filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Elastic_Search.py
|
from elasticsearch import Elasticsearch, helpers
import os
class ElasticSearch:
def __init__(self, index):
es_host = os.environ['es_hostname']
es_port = os.environ['es_port']
if es_host is None:
exit('You need to export Elasticsearch hostname')
if es_port is None:
exit('You need to export Elasticsearch port number')
self.es = Elasticsearch([{'host': es_host, 'port': es_port}])
self.index = index
def delete_index(self):
if self.exists():
self._result = self.es.indices.delete(self.index)
return self
def exists(self):
return self.es.indices.exists(self.index)
def create_index(self):
self._result= self.es.indices.create(self.index)
return self
def add_bulk(self, data, vtype):
actions = []
for item in data:
item_data = {
"_index" : self.index,
"_type" : vtype,
"_source": item,
}
actions.append(item_data)
return helpers.bulk(self.es, actions, index=self.index)
|
[] |
[] |
[
"es_port",
"es_hostname"
] |
[]
|
["es_port", "es_hostname"]
|
python
| 2 | 0 | |
internal/protoc/downloader.go
|
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package protoc
import (
"archive/zip"
"bytes"
"context"
"crypto/sha512"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/gofrs/flock"
"github.com/minish144/prototool-arm64-support/internal/file"
"github.com/minish144/prototool-arm64-support/internal/settings"
"github.com/minish144/prototool-arm64-support/internal/vars"
"go.uber.org/multierr"
"go.uber.org/zap"
)
const (
fileLockRetryDelay = 250 * time.Millisecond
fileLockTimeout = 10 * time.Second
)
type downloader struct {
lock sync.RWMutex
logger *zap.Logger
cachePath string
protocURL string
config settings.Config
// the looked-up and verified to exist base path
cachedBasePath string
// If set, Prototool will invoke protoc and include
// the well-known-types, from the configured binPath
// and wktPath.
protocBinPath string
protocWKTPath string
}
func newDownloader(config settings.Config, options ...DownloaderOption) (*downloader, error) {
downloader := &downloader{
config: config,
logger: zap.NewNop(),
}
for _, option := range options {
option(downloader)
}
if downloader.config.Compile.ProtobufVersion == "" {
downloader.config.Compile.ProtobufVersion = vars.DefaultProtocVersion
}
if downloader.protocBinPath != "" || downloader.protocWKTPath != "" {
if downloader.protocURL != "" {
return nil, fmt.Errorf("cannot use protoc-url in combination with either protoc-bin-path or protoc-wkt-path")
}
if downloader.protocBinPath == "" || downloader.protocWKTPath == "" {
return nil, fmt.Errorf("both protoc-bin-path and protoc-wkt-path must be set")
}
cleanBinPath := filepath.Clean(downloader.protocBinPath)
if _, err := os.Stat(cleanBinPath); os.IsNotExist(err) {
return nil, err
}
cleanWKTPath := filepath.Clean(downloader.protocWKTPath)
if _, err := os.Stat(cleanWKTPath); os.IsNotExist(err) {
return nil, err
}
protobufPath := filepath.Join(cleanWKTPath, "google", "protobuf")
info, err := os.Stat(protobufPath)
if os.IsNotExist(err) {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%q is not a valid well-known types directory", protobufPath)
}
downloader.protocBinPath = cleanBinPath
downloader.protocWKTPath = cleanWKTPath
}
return downloader, nil
}
func (d *downloader) Download() (string, error) {
d.lock.RLock()
cachedBasePath := d.cachedBasePath
d.lock.RUnlock()
if cachedBasePath != "" {
return cachedBasePath, nil
}
return d.cache()
}
func (d *downloader) ProtocPath() (string, error) {
if d.protocBinPath != "" {
return d.protocBinPath, nil
}
basePath, err := d.Download()
if err != nil {
return "", err
}
return filepath.Join(basePath, "bin", "protoc"), nil
}
func (d *downloader) WellKnownTypesIncludePath() (string, error) {
if d.protocWKTPath != "" {
return d.protocWKTPath, nil
}
basePath, err := d.Download()
if err != nil {
return "", err
}
return filepath.Join(basePath, "include"), nil
}
func (d *downloader) Delete() error {
basePath, err := d.getBasePathNoVersionOSARCH()
if err != nil {
return err
}
d.cachedBasePath = ""
d.logger.Debug("deleting", zap.String("path", basePath))
return os.RemoveAll(basePath)
}
func (d *downloader) cache() (_ string, retErr error) {
if d.protocBinPath != "" {
return d.protocBinPath, nil
}
d.lock.Lock()
defer d.lock.Unlock()
basePath, err := d.getBasePath()
if err != nil {
return "", err
}
lock, err := newFlock(basePath)
if err != nil {
return "", err
}
if err := flockLock(lock); err != nil {
return "", err
}
defer func() { retErr = multierr.Append(retErr, flockUnlock(lock)) }()
if err := d.checkDownloaded(basePath); err != nil {
if err := d.download(basePath); err != nil {
return "", err
}
if err := d.checkDownloaded(basePath); err != nil {
return "", err
}
d.logger.Debug("protobuf downloaded", zap.String("path", basePath))
} else {
d.logger.Debug("protobuf already downloaded", zap.String("path", basePath))
}
d.cachedBasePath = basePath
return basePath, nil
}
func (d *downloader) checkDownloaded(basePath string) error {
buffer := bytes.NewBuffer(nil)
cmd := exec.Command(filepath.Join(basePath, "bin", "protoc"), "--version")
cmd.Stdout = buffer
if err := cmd.Run(); err != nil {
return err
}
if d.protocURL != "" {
// skip version check since we do not know the version
return nil
}
output := strings.TrimSpace(buffer.String())
d.logger.Debug("output from protoc --version", zap.String("output", output))
expected := fmt.Sprintf("libprotoc %s", d.config.Compile.ProtobufVersion)
if output != expected {
return fmt.Errorf("expected %s from protoc --version, got %s", expected, output)
}
return nil
}
func (d *downloader) download(basePath string) (retErr error) {
return d.downloadInternal(basePath, runtime.GOOS, runtime.GOARCH)
}
func (d *downloader) downloadInternal(basePath string, goos string, goarch string) (retErr error) {
data, err := d.getDownloadData(goos, goarch)
if err != nil {
return err
}
// this is a working but hacky unzip
// there must be a library for this
// we don't properly copy directories, modification times, etc
readerAt := bytes.NewReader(data)
zipReader, err := zip.NewReader(readerAt, int64(len(data)))
if err != nil {
return err
}
for _, file := range zipReader.File {
fileMode := file.Mode()
d.logger.Debug("found protobuf file in zip", zap.String("fileName", file.Name), zap.Any("fileMode", fileMode))
if fileMode.IsDir() {
continue
}
readCloser, err := file.Open()
if err != nil {
return err
}
defer func() {
retErr = multierr.Append(retErr, readCloser.Close())
}()
fileData, err := ioutil.ReadAll(readCloser)
if err != nil {
return err
}
writeFilePath := filepath.Join(basePath, file.Name)
if err := os.MkdirAll(filepath.Dir(writeFilePath), 0755); err != nil {
return err
}
writeFile, err := os.OpenFile(writeFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode)
if err != nil {
return err
}
defer func() {
retErr = multierr.Append(retErr, writeFile.Close())
}()
if _, err := writeFile.Write(fileData); err != nil {
return err
}
d.logger.Debug("wrote protobuf file", zap.String("path", writeFilePath))
}
return nil
}
func (d *downloader) getDownloadData(goos string, goarch string) (_ []byte, retErr error) {
url, err := d.getProtocURL(goos, goarch)
if err != nil {
return nil, err
}
defer func() {
if retErr == nil {
d.logger.Debug("downloaded protobuf zip file", zap.String("url", url))
}
}()
switch {
case strings.HasPrefix(url, "file://"):
return ioutil.ReadFile(strings.TrimPrefix(url, "file://"))
case strings.HasPrefix(url, "http://"), strings.HasPrefix(url, "https://"):
response, err := http.Get(url)
if err != nil || response.StatusCode != http.StatusOK {
// if there is not given protocURL, we tried to
// download this from GitHub Releases, so add
// extra context to the error message
if d.protocURL == "" {
return nil, fmt.Errorf("error downloading %s: %v\nMake sure GitHub Releases has a proper protoc zip file of the form protoc-VERSION-OS-ARCH.zip at https://github.com/protocolbuffers/protobuf/releases/v%s\nNote that many micro versions do not have this, and no version before 3.0.0-beta-2 has this", url, err, d.config.Compile.ProtobufVersion)
}
return nil, err
}
defer func() {
if response.Body != nil {
retErr = multierr.Append(retErr, response.Body.Close())
}
}()
return ioutil.ReadAll(response.Body)
default:
return nil, fmt.Errorf("unknown url, can only handle http, https, file: %s", url)
}
}
func (d *downloader) getProtocURL(goos string, goarch string) (string, error) {
if d.protocURL != "" {
return d.protocURL, nil
}
_, unameM, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
protocS, err := getProtocSPath(goos)
if err != nil {
return "", err
}
// Protoc download URLs for release candidates don't use the hyphen in the version part of the URL
re := regexp.MustCompile(`(rc)-(\d+$)`)
version := re.ReplaceAllString(d.config.Compile.ProtobufVersion, "$1$2")
return fmt.Sprintf(
"https://github.com/protocolbuffers/protobuf/releases/download/v%s/protoc-%s-%s-%s.zip",
version,
d.config.Compile.ProtobufVersion,
protocS,
unameM,
), nil
}
func (d *downloader) getBasePath() (string, error) {
basePathNoVersion, err := d.getBasePathNoVersion()
if err != nil {
return "", err
}
return filepath.Join(basePathNoVersion, d.getBasePathVersionPart()), nil
}
func (d *downloader) getBasePathNoVersionOSARCH() (string, error) {
basePath := d.cachePath
var err error
if basePath == "" {
basePath, err = getDefaultBasePathNoOSARCH()
if err != nil {
return "", err
}
} else {
basePath, err = file.AbsClean(basePath)
if err != nil {
return "", err
}
}
if err := file.CheckAbs(basePath); err != nil {
return "", err
}
return basePath, nil
}
func (d *downloader) getBasePathNoVersion() (string, error) {
basePath := d.cachePath
var err error
if basePath == "" {
basePath, err = getDefaultBasePath()
if err != nil {
return "", err
}
} else {
basePath, err = file.AbsClean(basePath)
if err != nil {
return "", err
}
}
if err := file.CheckAbs(basePath); err != nil {
return "", err
}
return filepath.Join(basePath, "protobuf"), nil
}
func (d *downloader) getBasePathVersionPart() string {
if d.protocURL != "" {
// we don't know the version or what is going on here
hash := sha512.New()
_, _ = hash.Write([]byte(d.protocURL))
return base64.URLEncoding.EncodeToString(hash.Sum(nil))
}
return d.config.Compile.ProtobufVersion
}
func getDefaultBasePath() (string, error) {
return getDefaultBasePathInternal(runtime.GOOS, runtime.GOARCH, os.Getenv)
}
func getDefaultBasePathInternal(goos string, goarch string, getenvFunc func(string) string) (string, error) {
basePathNoOSARCH, err := getDefaultBasePathInternalNoOSARCH(goos, goarch, getenvFunc)
if err != nil {
return "", err
}
unameS, unameM, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
return filepath.Join(basePathNoOSARCH, unameS, unameM), nil
}
func getDefaultBasePathNoOSARCH() (string, error) {
return getDefaultBasePathInternalNoOSARCH(runtime.GOOS, runtime.GOARCH, os.Getenv)
}
func getDefaultBasePathInternalNoOSARCH(goos string, goarch string, getenvFunc func(string) string) (string, error) {
unameS, _, err := getUnameSUnameMPaths(goos, goarch)
if err != nil {
return "", err
}
xdgCacheHome := getenvFunc("XDG_CACHE_HOME")
if xdgCacheHome != "" {
return filepath.Join(xdgCacheHome, "prototool"), nil
}
home := getenvFunc("HOME")
if home == "" {
return "", fmt.Errorf("HOME is not set")
}
switch unameS {
case "Darwin":
return filepath.Join(home, "Library", "Caches", "prototool"), nil
case "Linux":
return filepath.Join(home, ".cache", "prototool"), nil
default:
return "", fmt.Errorf("invalid value for uname -s: %v", unameS)
}
}
func getProtocSPath(goos string) (string, error) {
switch goos {
case "darwin":
return "osx", nil
case "linux":
return "linux", nil
default:
return "", fmt.Errorf("unsupported value for runtime.GOOS: %v", goos)
}
}
func getUnameSUnameMPaths(goos string, goarch string) (string, string, error) {
var unameS string
switch goos {
case "darwin":
unameS = "Darwin"
case "linux":
unameS = "Linux"
default:
return "", "", fmt.Errorf("unsupported value for runtime.GOOS: %v", goos)
}
var unameM string
switch goarch {
case "amd64":
unameM = "x86_64"
case "arm64":
unameM = "x86_64"
default:
return "", "", fmt.Errorf("unsupported value for runtime.GOARCH: %v", goarch)
}
return unameS, unameM, nil
}
func newFlock(basePath string) (*flock.Flock, error) {
fileLockPath := basePath + ".lock"
// mkdir is atomic
if err := os.MkdirAll(filepath.Dir(fileLockPath), 0755); err != nil {
return nil, err
}
return flock.New(fileLockPath), nil
}
func flockLock(lock *flock.Flock) error {
ctx, cancel := context.WithTimeout(context.Background(), fileLockTimeout)
defer cancel()
locked, err := lock.TryLockContext(ctx, fileLockRetryDelay)
if err != nil {
return fmt.Errorf("error acquiring file lock at %s - if you think this is in error, remove %s: %v", lock.Path(), lock.Path(), err)
}
if !locked {
return fmt.Errorf("could not acquire file lock at %s after %v - if you think this is in error, remove %s", lock.Path(), fileLockTimeout, lock.Path())
}
return nil
}
func flockUnlock(lock *flock.Flock) error {
if err := lock.Unlock(); err != nil {
return fmt.Errorf("error unlocking file lock at %s: %v", lock.Path(), err)
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
main.go
|
package main
import (
"bytes"
"fmt"
"os"
"strings"
"text/template"
"github.com/muesli/beehive/bees"
_ "github.com/muesli/beehive/bees/githubbee"
_ "github.com/muesli/beehive/bees/hellobee"
"github.com/muesli/beehive/cfg"
"github.com/muesli/beehive/reactor"
"github.com/muesli/termenv"
_ "github.com/rubiojr/commit-monitor/bees/stdoutbee"
)
// Repositories to monitor (owner, repo name)
var repos = [][]string{
{"rails", "rails"},
{"torvalds", "linux"},
{"kubernetes", "kubernetes"},
{"prometheus", "prometheus"},
{"golang", "go"},
{"LineageOS", "android_frameworks_base"},
{"rubiojr", "hello"},
}
func main() {
// configuration boilerplate using the memory backend
// so we don't touch the filesystem.
config, err := cfg.New("mem://")
if err != nil {
panic(err)
}
config.Bees = []bees.BeeConfig{}
config.Chains = []bees.Chain{}
// the stdout bee prints to stdout via the
stdoutBee := newStdoutBee()
config.Bees = append(config.Bees, stdoutBee)
// Create the Action and add it to the config
// Every chain will re-use the same action to print commits to stdout
// using the 'print' action
action := bees.Action{}
action.ID = "print-to-stdout"
action.Bee = stdoutBee.Name
action.Name = "print"
action.Options = bees.Placeholders{
{
Name: "text",
// prints something like: ** New commit in owner/repo ** for every commit
Value: formatText(`{{ Bold (Foreground "#FF0000" "**") }}`) +
" New commit in {{.repo}} " +
formatText(`{{ Bold (Foreground "#FF0000" "**") }}`) +
"\n" +
"{{.message}}\n",
Type: "string",
},
}
config.Actions = []bees.Action{action}
// Iterate over all the repositories we want to monitor
// and create a new chain that will link the 'commit' event
// to the 'print-to-stdout' action.
for _, repo := range repos {
nwo := strings.Join(repo, "/") // owner/repository
// the GitHub bee is in charge of monitoring events
// for the given repository
bee := newGitHubBee(repo[0], repo[1])
config.Bees = append(config.Bees, bee)
// Create the event
event := bees.Event{}
event.Name = "commit"
event.Bee = bee.Name
// Create the chain and add it to the existing chains
chain := bees.Chain{}
chain.Name = "commits-" + nwo
chain.Description = "Print commits for " + nwo
chain.Actions = []string{action.ID} // Action to print the commit
chain.Event = &event
chain.Filters = []string{}
config.Chains = append(config.Chains, chain)
}
// Debugging level, prints debug messages from bees
// reactor.SetLogLevel(5)
reactor.Run(config)
}
func newGitHubBee(owner, repo string) bees.BeeConfig {
options := bees.BeeOptions{
bees.BeeOption{Name: "accesstoken", Value: os.Getenv("GITHUB_TOKEN")},
bees.BeeOption{Name: "owner", Value: owner},
bees.BeeOption{Name: "repository", Value: repo},
}
bc, err := bees.NewBeeConfig(owner+"-"+repo, "githubbee", fmt.Sprintf("monitor %s/%s commits", owner, repo), options)
if err != nil {
panic(err)
}
return bc
}
func newStdoutBee() bees.BeeConfig {
options := bees.BeeOptions{}
bc, err := bees.NewBeeConfig("stdout", "stdoutbee", "test", options)
if err != nil {
panic(err)
}
return bc
}
func formatText(text string) string {
// load template helpers
f := termenv.TemplateFuncs(termenv.ColorProfile())
tpl := template.New("tpl").Funcs(f)
// parse and render
tpl, err := tpl.Parse(text)
if err != nil {
panic(err)
}
var buf bytes.Buffer
tpl.Execute(&buf, nil)
return buf.String()
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
tests/conftest.py
|
import compileall
import fnmatch
import io
import os
import re
import shutil
import subprocess
import sys
import time
from contextlib import contextmanager
import pytest
import six
from mock import patch
from pip._vendor.contextlib2 import ExitStack, nullcontext
from setuptools.wheel import Wheel
from pip._internal.cli.main import main as pip_entry_point
from pip._internal.utils.temp_dir import global_tempdir_manager
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from tests.lib import DATA_DIR, SRC_DIR, PipTestEnvironment, TestData
from tests.lib.certs import make_tls_cert, serialize_cert, serialize_key
from tests.lib.path import Path
from tests.lib.server import make_mock_server, server_running
from tests.lib.venv import VirtualEnvironment
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable
from tests.lib.server import MockServer as _MockServer
from tests.lib.server import Responder
def pytest_addoption(parser):
parser.addoption(
"--keep-tmpdir",
action="store_true",
default=False,
help="keep temporary test directories",
)
parser.addoption(
"--new-resolver",
action="store_true",
default=False,
help="use new resolver in tests",
)
parser.addoption(
"--new-resolver-runtests",
action="store_true",
default=False,
help="run the skipped tests for the new resolver",
)
parser.addoption(
"--use-venv",
action="store_true",
default=False,
help="use venv for virtual environment creation",
)
def pytest_collection_modifyitems(config, items):
for item in items:
if not hasattr(item, 'module'): # e.g.: DoctestTextfile
continue
# Mark network tests as flaky
if (item.get_closest_marker('network') is not None and
"CI" in os.environ):
item.add_marker(pytest.mark.flaky(reruns=3))
if (item.get_closest_marker('fails_on_new_resolver') and
config.getoption("--new-resolver") and
not config.getoption("--new-resolver-runtests")):
item.add_marker(pytest.mark.skip(
'This test does not work with the new resolver'))
if six.PY3:
if (item.get_closest_marker('incompatible_with_test_venv') and
config.getoption("--use-venv")):
item.add_marker(pytest.mark.skip(
'Incompatible with test venv'))
if (item.get_closest_marker('incompatible_with_venv') and
sys.prefix != sys.base_prefix):
item.add_marker(pytest.mark.skip(
'Incompatible with venv'))
module_path = os.path.relpath(
item.module.__file__,
os.path.commonprefix([__file__, item.module.__file__]),
)
module_root_dir = module_path.split(os.pathsep)[0]
if (module_root_dir.startswith("functional") or
module_root_dir.startswith("integration") or
module_root_dir.startswith("lib")):
item.add_marker(pytest.mark.integration)
elif module_root_dir.startswith("unit"):
item.add_marker(pytest.mark.unit)
else:
raise RuntimeError(
"Unknown test type (filename = {})".format(module_path)
)
@pytest.fixture(scope="session", autouse=True)
def use_new_resolver(request):
"""Set environment variable to make pip default to the new resolver.
"""
new_resolver = request.config.getoption("--new-resolver")
features = set(os.environ.get("PIP_USE_FEATURE", "").split())
if new_resolver:
features.add("2020-resolver")
else:
features.discard("2020-resolver")
with patch.dict(os.environ, {"PIP_USE_FEATURE": " ".join(features)}):
yield new_resolver
@pytest.fixture(scope='session')
def tmpdir_factory(request, tmpdir_factory):
""" Modified `tmpdir_factory` session fixture
that will automatically cleanup after itself.
"""
yield tmpdir_factory
if not request.config.getoption("--keep-tmpdir"):
# py.path.remove() uses str paths on Python 2 and cannot
# handle non-ASCII file names. This works around the problem by
# passing a unicode object to rmtree().
shutil.rmtree(
six.text_type(tmpdir_factory.getbasetemp()),
ignore_errors=True,
)
@pytest.fixture
def tmpdir(request, tmpdir):
"""
Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory. The returned object is a ``tests.lib.path.Path`` object.
This uses the built-in tmpdir fixture from pytest itself but modified
to return our typical path object instead of py.path.local as well as
deleting the temporary directories at the end of each test case.
"""
assert tmpdir.isdir()
yield Path(str(tmpdir))
# Clear out the temporary directory after the test has finished using it.
# This should prevent us from needing a multiple gigabyte temporary
# directory while running the tests.
if not request.config.getoption("--keep-tmpdir"):
# py.path.remove() uses str paths on Python 2 and cannot
# handle non-ASCII file names. This works around the problem by
# passing a unicode object to rmtree().
shutil.rmtree(six.text_type(tmpdir), ignore_errors=True)
@pytest.fixture(autouse=True)
def isolate(tmpdir, monkeypatch):
"""
Isolate our tests so that things like global configuration files and the
like do not affect our test results.
We use an autouse function scoped fixture because we want to ensure that
every test has it's own isolated home directory.
"""
# TODO: Figure out how to isolate from *system* level configuration files
# as well as user level configuration files.
# Create a directory to use as our home location.
home_dir = os.path.join(str(tmpdir), "home")
os.makedirs(home_dir)
# Create a directory to use as a fake root
fake_root = os.path.join(str(tmpdir), "fake-root")
os.makedirs(fake_root)
if sys.platform == 'win32':
# Note: this will only take effect in subprocesses...
home_drive, home_path = os.path.splitdrive(home_dir)
monkeypatch.setenv('USERPROFILE', home_dir)
monkeypatch.setenv('HOMEDRIVE', home_drive)
monkeypatch.setenv('HOMEPATH', home_path)
for env_var, sub_path in (
('APPDATA', 'AppData/Roaming'),
('LOCALAPPDATA', 'AppData/Local'),
):
path = os.path.join(home_dir, *sub_path.split('/'))
monkeypatch.setenv(env_var, path)
os.makedirs(path)
else:
# Set our home directory to our temporary directory, this should force
# all of our relative configuration files to be read from here instead
# of the user's actual $HOME directory.
monkeypatch.setenv("HOME", home_dir)
# Isolate ourselves from XDG directories
monkeypatch.setenv("XDG_DATA_HOME", os.path.join(
home_dir, ".local", "share",
))
monkeypatch.setenv("XDG_CONFIG_HOME", os.path.join(
home_dir, ".config",
))
monkeypatch.setenv("XDG_CACHE_HOME", os.path.join(home_dir, ".cache"))
monkeypatch.setenv("XDG_RUNTIME_DIR", os.path.join(
home_dir, ".runtime",
))
monkeypatch.setenv("XDG_DATA_DIRS", os.pathsep.join([
os.path.join(fake_root, "usr", "local", "share"),
os.path.join(fake_root, "usr", "share"),
]))
monkeypatch.setenv("XDG_CONFIG_DIRS", os.path.join(
fake_root, "etc", "xdg",
))
# Configure git, because without an author name/email git will complain
# and cause test failures.
monkeypatch.setenv("GIT_CONFIG_NOSYSTEM", "1")
monkeypatch.setenv("GIT_AUTHOR_NAME", "pip")
monkeypatch.setenv("GIT_AUTHOR_EMAIL", "[email protected]")
# We want to disable the version check from running in the tests
monkeypatch.setenv("PIP_DISABLE_PIP_VERSION_CHECK", "true")
# Make sure tests don't share a requirements tracker.
monkeypatch.delenv("PIP_REQ_TRACKER", False)
# FIXME: Windows...
os.makedirs(os.path.join(home_dir, ".config", "git"))
with open(os.path.join(home_dir, ".config", "git", "config"), "wb") as fp:
fp.write(
b"[user]\n\tname = pip\n\temail = [email protected]\n"
)
@pytest.fixture(autouse=True)
def scoped_global_tempdir_manager(request):
"""Make unit tests with globally-managed tempdirs easier
Each test function gets its own individual scope for globally-managed
temporary directories in the application.
"""
if "no_auto_tempdir_manager" in request.keywords:
ctx = nullcontext
else:
ctx = global_tempdir_manager
with ctx():
yield
@pytest.fixture(scope='session')
def pip_src(tmpdir_factory):
def not_code_files_and_folders(path, names):
# In the root directory...
if path == SRC_DIR:
# ignore all folders except "src"
folders = {name for name in names if os.path.isdir(path / name)}
to_ignore = folders - {"src"}
# and ignore ".git" if present (which may be a file if in a linked
# worktree).
if ".git" in names:
to_ignore.add(".git")
return to_ignore
# Ignore all compiled files and egg-info.
ignored = set()
for pattern in ("__pycache__", "*.pyc", "pip.egg-info"):
ignored.update(fnmatch.filter(names, pattern))
return ignored
pip_src = Path(str(tmpdir_factory.mktemp('pip_src'))).joinpath('pip_src')
# Copy over our source tree so that each use is self contained
shutil.copytree(
SRC_DIR,
pip_src.resolve(),
ignore=not_code_files_and_folders,
)
return pip_src
def _common_wheel_editable_install(tmpdir_factory, common_wheels, package):
wheel_candidates = list(
common_wheels.glob('{package}-*.whl'.format(**locals())))
assert len(wheel_candidates) == 1, wheel_candidates
install_dir = Path(str(tmpdir_factory.mktemp(package))) / 'install'
Wheel(wheel_candidates[0]).install_as_egg(install_dir)
(install_dir / 'EGG-INFO').rename(
install_dir / '{package}.egg-info'.format(**locals()))
assert compileall.compile_dir(str(install_dir), quiet=1)
return install_dir
@pytest.fixture(scope='session')
def setuptools_install(tmpdir_factory, common_wheels):
return _common_wheel_editable_install(tmpdir_factory,
common_wheels,
'setuptools')
@pytest.fixture(scope='session')
def wheel_install(tmpdir_factory, common_wheels):
return _common_wheel_editable_install(tmpdir_factory,
common_wheels,
'wheel')
@pytest.fixture(scope='session')
def coverage_install(tmpdir_factory, common_wheels):
return _common_wheel_editable_install(tmpdir_factory,
common_wheels,
'coverage')
def install_egg_link(venv, project_name, egg_info_dir):
with open(venv.site / 'easy-install.pth', 'a') as fp:
fp.write(str(egg_info_dir.resolve()) + '\n')
with open(venv.site / (project_name + '.egg-link'), 'w') as fp:
fp.write(str(egg_info_dir) + '\n.')
@pytest.fixture(scope='session')
def virtualenv_template(request, tmpdir_factory, pip_src,
setuptools_install, coverage_install):
if six.PY3 and request.config.getoption('--use-venv'):
venv_type = 'venv'
else:
venv_type = 'virtualenv'
# Create the virtual environment
tmpdir = Path(str(tmpdir_factory.mktemp('virtualenv')))
venv = VirtualEnvironment(
tmpdir.joinpath("venv_orig"), venv_type=venv_type
)
# Install setuptools and pip.
install_egg_link(venv, 'setuptools', setuptools_install)
pip_editable = Path(str(tmpdir_factory.mktemp('pip'))) / 'pip'
shutil.copytree(pip_src, pip_editable, symlinks=True)
# noxfile.py is Python 3 only
assert compileall.compile_dir(
str(pip_editable), quiet=1, rx=re.compile("noxfile.py$"),
)
subprocess.check_call([venv.bin / 'python', 'setup.py', '-q', 'develop'],
cwd=pip_editable)
# Install coverage and pth file for executing it in any spawned processes
# in this virtual environment.
install_egg_link(venv, 'coverage', coverage_install)
# zz prefix ensures the file is after easy-install.pth.
with open(venv.site / 'zz-coverage-helper.pth', 'a') as f:
f.write('import coverage; coverage.process_startup()')
# Drop (non-relocatable) launchers.
for exe in os.listdir(venv.bin):
if not (
exe.startswith('python') or
exe.startswith('libpy') # Don't remove libpypy-c.so...
):
(venv.bin / exe).unlink()
# Enable user site packages.
venv.user_site_packages = True
# Rename original virtualenv directory to make sure
# it's not reused by mistake from one of the copies.
venv_template = tmpdir / "venv_template"
venv.move(venv_template)
yield venv
@pytest.fixture(scope="session")
def virtualenv_factory(virtualenv_template):
def factory(tmpdir):
return VirtualEnvironment(tmpdir, virtualenv_template)
return factory
@pytest.fixture
def virtualenv(virtualenv_factory, tmpdir):
"""
Return a virtual environment which is unique to each test function
invocation created inside of a sub directory of the test function's
temporary directory. The returned object is a
``tests.lib.venv.VirtualEnvironment`` object.
"""
yield virtualenv_factory(tmpdir.joinpath("workspace", "venv"))
@pytest.fixture
def with_wheel(virtualenv, wheel_install):
install_egg_link(virtualenv, 'wheel', wheel_install)
@pytest.fixture(scope="session")
def script_factory(virtualenv_factory, deprecated_python):
def factory(tmpdir, virtualenv=None):
if virtualenv is None:
virtualenv = virtualenv_factory(tmpdir.joinpath("venv"))
return PipTestEnvironment(
# The base location for our test environment
tmpdir,
# Tell the Test Environment where our virtualenv is located
virtualenv=virtualenv,
# Do not ignore hidden files, they need to be checked as well
ignore_hidden=False,
# We are starting with an already empty directory
start_clear=False,
# We want to ensure no temporary files are left behind, so the
# PipTestEnvironment needs to capture and assert against temp
capture_temp=True,
assert_no_temp=True,
# Deprecated python versions produce an extra deprecation warning
pip_expect_warning=deprecated_python,
)
return factory
@pytest.fixture
def script(tmpdir, virtualenv, script_factory):
"""
Return a PipTestEnvironment which is unique to each test function and
will execute all commands inside of the unique virtual environment for this
test function. The returned object is a
``tests.lib.PipTestEnvironment``.
"""
return script_factory(tmpdir.joinpath("workspace"), virtualenv)
@pytest.fixture(scope="session")
def common_wheels():
"""Provide a directory with latest setuptools and wheel wheels"""
return DATA_DIR.joinpath('common_wheels')
@pytest.fixture(scope="session")
def shared_data(tmpdir_factory):
return TestData.copy(Path(str(tmpdir_factory.mktemp("data"))))
@pytest.fixture
def data(tmpdir):
return TestData.copy(tmpdir.joinpath("data"))
class InMemoryPipResult(object):
def __init__(self, returncode, stdout):
self.returncode = returncode
self.stdout = stdout
class InMemoryPip(object):
def pip(self, *args):
orig_stdout = sys.stdout
if six.PY3:
stdout = io.StringIO()
else:
stdout = io.BytesIO()
sys.stdout = stdout
try:
returncode = pip_entry_point(list(args))
except SystemExit as e:
returncode = e.code or 0
finally:
sys.stdout = orig_stdout
return InMemoryPipResult(returncode, stdout.getvalue())
@pytest.fixture
def in_memory_pip():
return InMemoryPip()
@pytest.fixture(scope="session")
def deprecated_python():
"""Used to indicate whether pip deprecated this Python version"""
return sys.version_info[:2] in [(2, 7), (3, 5)]
@pytest.fixture(scope="session")
def cert_factory(tmpdir_factory):
def factory():
# type: () -> str
"""Returns path to cert/key file.
"""
output_path = Path(str(tmpdir_factory.mktemp("certs"))) / "cert.pem"
# Must be Text on PY2.
cert, key = make_tls_cert(u"localhost")
with open(str(output_path), "wb") as f:
f.write(serialize_cert(cert))
f.write(serialize_key(key))
return str(output_path)
return factory
class MockServer(object):
def __init__(self, server):
# type: (_MockServer) -> None
self._server = server
self._running = False
self.context = ExitStack()
@property
def port(self):
return self._server.port
@property
def host(self):
return self._server.host
def set_responses(self, responses):
# type: (Iterable[Responder]) -> None
assert not self._running, "responses cannot be set on running server"
self._server.mock.side_effect = responses
def start(self):
# type: () -> None
assert not self._running, "running server cannot be started"
self.context.enter_context(server_running(self._server))
self.context.enter_context(self._set_running())
@contextmanager
def _set_running(self):
self._running = True
try:
yield
finally:
self._running = False
def stop(self):
# type: () -> None
assert self._running, "idle server cannot be stopped"
self.context.close()
def get_requests(self):
# type: () -> Dict[str, str]
"""Get environ for each received request.
"""
assert not self._running, "cannot get mock from running server"
return [
call.args[0] for call in self._server.mock.call_args_list
]
@pytest.fixture
def mock_server():
server = make_mock_server()
test_server = MockServer(server)
with test_server.context:
yield test_server
@pytest.fixture
def utc():
# time.tzset() is not implemented on some platforms, e.g. Windows.
tzset = getattr(time, 'tzset', lambda: None)
with patch.dict(os.environ, {'TZ': 'UTC'}):
tzset()
yield
tzset()
|
[] |
[] |
[
"PIP_USE_FEATURE"
] |
[]
|
["PIP_USE_FEATURE"]
|
python
| 1 | 0 | |
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 10
FLASKY_FOLLOWERS_PER_PAGE = 70
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
|
[] |
[] |
[
"MAIL_PASSWORD",
"DEV_DATABASE_URL",
"DATABASE_URL",
"SECRET_KEY",
"MAIL_USERNAME",
"SSL_DISABLE",
"FLASKY_ADMIN",
"TEST_DATABASE_URL"
] |
[]
|
["MAIL_PASSWORD", "DEV_DATABASE_URL", "DATABASE_URL", "SECRET_KEY", "MAIL_USERNAME", "SSL_DISABLE", "FLASKY_ADMIN", "TEST_DATABASE_URL"]
|
python
| 8 | 0 | |
.github/scripts/store_benchmark.py
|
from __future__ import print_function
import os
import os.path
import json
import sys
import shutil
import time
import subprocess
src_path = os.path.join(os.getcwd(), "src")
sys.path.insert(0, src_path)
from rez.utils._version import _rez_version # noqa
# max number of result artifacts to store
MAX_ARTIFACTS = 100
# behave differently outside of github actions, for testing
in_gh = (os.getenv("GITHUB_ACTIONS") == "true")
benchmarking_dir = os.path.join("metrics", "benchmarking")
artifacts_dir = os.path.join(benchmarking_dir, "artifacts")
gnuplot_error = None
results_md_template = \
"""
# Benchmarking Results
This document contains historical benchmarking results. These measure the speed
of resolution of a list of predetermined requests. Do **NOT** change this file
by hand; the 'benchmark' Github workflow does this automatically.
{gnuplot_image}
| Rez | Python | Platform | CPU | #CPU | Median | Mean | StdDev |
|-----|--------|----------|-----|------|--------|------|--------|
{rows}
""" # noqa
gnuplot_script = \
"""
set xtics rotate
set term png
set border 1
set output 'solvetimes.png'
plot "solvetimes.dat" using 2:xtic(1) title 'Mean' with lines, \
"solvetimes.dat" using 3:xtic(1) title 'Median' with lines lc "gray", \
"solvetimes.dat" using 2:4 title 'Stddev' with errorbars
""" # noqa
def store_result():
# create dated + versioned directory to store benchmark results
# Dir in the form:
#
# YYYY.MM.DD-PYMAJOR.PYMINOR-REZVER
#
destdir = '-'.join((
time.strftime("%Y.%m.%d"),
"%d.%d" % sys.version_info[:2],
_rez_version
))
destpath = os.path.join(artifacts_dir, destdir)
if os.path.exists(destpath):
return
os.makedirs(destpath)
# take the files that the artifact download created, and move them into
# the versioned directory. Note that the GH workflow is currently running
# with cwd=./src, but these artifacts are in the dir above
#
artifact_files = [
"resolves.json",
"summary.json"
]
results_path = os.path.dirname(os.getcwd())
for filename in artifact_files:
os.rename(
os.path.join(results_path, filename),
os.path.join(destpath, filename)
)
def remove_old_results():
path = os.path.join(benchmarking_dir, "artifacts")
dirs = sorted(os.listdir(path))
while len(dirs) > MAX_ARTIFACTS:
shutil.rmtree(os.path.join(path, dirs[0]))
dirs = dirs[1:]
def generate_gnuplot():
global gnuplot_error
# detect latest python in benchmark results
pyvers = set()
for summary in _iter_summaries():
pyver = summary["py_version"]
pyvers.add(tuple(int(x) for x in pyver.split('.')))
latest_pyver = '.'.join(str(x) for x in max(pyvers))
# generate data file for gnuplot to consume. Just use results from latest
# python
#
dat_filepath = os.path.join(benchmarking_dir, "solvetimes.dat")
with open(dat_filepath, 'w') as f:
for summary in _iter_summaries():
if summary["py_version"] != latest_pyver:
continue
f.write(
"%s-py%s %f %f %f\n"
% (
summary["rez_version"],
summary["py_version"],
summary["mean"],
summary["median"],
summary["stddev"]
)
)
# create gnuplot script
script_filepath = os.path.join(benchmarking_dir, "solvetimes.gnuplot")
with open(script_filepath, 'w') as f:
f.write(gnuplot_script)
# run gnuplot
try:
subprocess.check_output(
["gnuplot", "./solvetimes.gnuplot"],
cwd=benchmarking_dir
)
except Exception as e:
gnuplot_error = str(e)
finally:
os.remove(dat_filepath)
os.remove(script_filepath)
def update_markdown():
columns = (
"rez_version",
"py_version",
"platform",
"cpu",
"num_cpu",
"median",
"mean",
"stddev"
)
def _tostr(value):
if isinstance(value, float):
return "%.02f" % value
else:
return str(value)
md_lines = []
variables = {}
# generate table
for summary in _iter_summaries():
line = "| " + " | ".join(_tostr(summary[x]) for x in columns) + " |"
md_lines.append(line)
variables["rows"] = '\n'.join(md_lines)
# insert previously generated gnuplot image
if os.path.exists(os.path.join(benchmarking_dir, "solvetimes.png")):
variables["gnuplot_image"] = (
'<p align="center"><img src="solvetimes.png" /></p>'
)
else:
variables["gnuplot_image"] = (
"Gnuplot failed:\n```%s```" % gnuplot_error
)
# generate and write out markdown
results_md = results_md_template.format(**variables)
md_filepath = os.path.join(benchmarking_dir, "RESULTS.md")
with open(md_filepath, "w") as f:
f.write(results_md)
def _iter_summaries():
def sort_fn(path):
# sort by rez version, then py version
parts = path.split('-')
vers_str = parts[-1] + '.' + parts[-2]
return [int(x) for x in vers_str.split('.')]
for name in sorted(os.listdir(artifacts_dir), key=sort_fn):
filepath = os.path.join(artifacts_dir, name, "summary.json")
with open(filepath) as f:
yield json.loads(f.read())
if __name__ == "__main__":
if in_gh:
store_result()
remove_old_results()
generate_gnuplot()
update_markdown()
|
[] |
[] |
[
"GITHUB_ACTIONS"
] |
[]
|
["GITHUB_ACTIONS"]
|
python
| 1 | 0 | |
django_project/django_project/settings.py
|
"""
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pcg7tqq$5s7_@42h17q)ct=ohpc!mui=zt#ni^8oi7xiym&*%s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
|
[] |
[] |
[
"EMAIL_PASS",
"EMAIL_USER"
] |
[]
|
["EMAIL_PASS", "EMAIL_USER"]
|
python
| 2 | 0 | |
bindings/java/examples/basic-app/src/main/java/org/example/Migration.java
|
package org.example;
import java.nio.file.Paths;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.LinkedList;
import java.util.Collections;
import java.util.Comparator;
import java.util.stream.Collectors;
import org.iota.wallet.Account;
import org.iota.wallet.AccountManager;
import org.iota.wallet.AccountManagerBuilder;
import org.iota.wallet.AccountSignerType;
import org.iota.wallet.ClientOptions;
import org.iota.wallet.ClientOptionsBuilder;
import org.iota.wallet.EventManager;
import org.iota.wallet.InputData;
import org.iota.wallet.MigrationBundle;
import org.iota.wallet.MigrationBundleOptions;
import org.iota.wallet.MigrationData;
import org.iota.wallet.MigrationProgressListener;
import org.iota.wallet.MigrationProgressEvent;
import org.iota.wallet.MigrationProgressType;
import org.iota.wallet.local.*;
import org.iota.wallet.*;
/**
* Needs the following settings from ENV:
*
* process.env.MIGRATION_SEED
* process.env.SH_PASSWORD
*/
public class Migration implements MigrationProgressListener {
// Address security level
public static final byte ADDRESS_SECURITY_LEVEL = 2;
// Minimum balance that is required for a migration bundle, because of the dust protection in the new network
public static final int MINIMUM_MIGRATION_BALANCE = 1000000;
// This value shouldn't be too high, because then the PoW could take to long to get it confirmed
public static final int MAX_INPUTS_PER_BUNDLE = 10;
// Wallet.rs database storage path. Stronghold and database file would be stored in this path.
public static final String DB_STORAGE_PATH = "./migration-database";
// Legacy network nodes. Mainnet: https://nodes.iota.org
public static final String[] LEGACY_NETWORK_NODES = new String[] { "https://nodes-legacy.iotatestmigration6.net/" };
// Legacy permanode. Mainnet: https://chronicle.iota.org/api
public static final String LEGACY_PERMANODE = "https://nodes-legacy.iotatestmigration6.net/";
// Chrysalis node. Mainnet: https://chrysalis-nodes.iota.cafe
public static final String CHRYSALIS_NODE = "https://api.thin-hornet-0.h.migration6.iotatestmigration6.net";
// ------------------------------------------
// We store all bundle hashes here and check later if the bundles got confirmed
private List<String> migrationBundleHashes;
// Did we start the sending/checking process
private boolean started = false;
private Account account;
public Migration(){
this.migrationBundleHashes = new LinkedList<>();
this.account = null;
}
/**
* Displays information about the Migration account to which we send our funds.
* Requires Migration.run() to be called first
*
* @return The total balance on the new account
*/
public long displayMigration(){
if (this.account == null) return -1;
this.account.sync().execute();
System.out.println("= Migration Account =");
System.out.println("last synced: " + this.account.lastSyncedAt().get().getTime());
System.out.println("balance: " + this.account.balance());
System.out.println("latest address: " + this.account.latestAddress());
System.out.println("unused address: " + this.account.getUnusedAddress());
return this.account.balance().getTotal();
}
// Log migration events
@Override
public void onMigrationProgress(MigrationProgressEvent event) {
// After a successful broadcast of this bundle, the library will automatically reattach bundle to
// speed up the confirmation process. An event with type "TransactionConfirmed" (with corresponding bundle hash)
// will be emitted as soon as the bundle is confirmed.
if (event.getType().equals(MigrationProgressType.TRANSACTION_CONFIRMED)) {
System.out.println("MigrationProgress: " + event);
migrationBundleHashes.remove(event.asTransactionConfirmed().getBundleHash());
if (migrationBundleHashes.size() == 0 && started) {
System.out.println("Migration done! ");
System.out.println("funds migrated to: " + this.account.latestAddress());
this.started = false;
this.account = null;
return;
}
System.out.println("Still unconfirmed bundles: " + Arrays.toString(migrationBundleHashes.toArray(new String[0])));
}
}
public boolean finished(){
return started && migrationBundleHashes.size() == 0;
}
public String run(){
try {
// Attach an event listener to keep track of the migration process
EventManager.subscribeMigrationProgress(this);
AccountManagerBuilder builder = AccountManager.Builder().withStorage(DB_STORAGE_PATH, null);
// Set stronghold password
AccountManager manager = builder.finish();
manager.setStrongholdPassword(System.getenv("SH_PASSWORD"));
// IMPORTANT: SAVE THIS MNEMONIC SECURELY. IF YOU LOSE IT, YOU POTENTIALLY LOSE EVERYTHING.
String mnemonic = manager.generateMnemonic();
System.out.println("IMPORTANT: SAVE THIS MNEMONIC SECURELY. IF YOU LOSE IT, YOU POTENTIALLY LOSE EVERYTHING."
+ System.lineSeparator() + mnemonic);
manager.storeMnemonic(AccountSignerType.STRONGHOLD, mnemonic);
// network migration6 for the migration testnet, otherwise leave out the network option for mainnet
ClientOptions clientOptions = new ClientOptionsBuilder()
.withNode(CHRYSALIS_NODE)
.withNetwork("migration6")
.build();
this.account = manager
.createAccount(clientOptions)
.alias("Migration")
.initialise();
System.out.println("Account created: " + account.alias());
// Nodes for the legacy network
String[] nodes = LEGACY_NETWORK_NODES;
String seed = System.getenv("MIGRATION_SEED");
// 0 is starting index
// 50 is gap limit (address range we check)
MigrationData migrationData = manager.getMigrationData(nodes, seed, LEGACY_PERMANODE,
ADDRESS_SECURITY_LEVEL, 0, 50);
if (migrationData.balance() > 0) {
List<List<InputData>> input_batches = getMigrationBundles(migrationData.inputs());
// create bundles with the inputs
for (List<InputData> batch : input_batches) {
try {
MigrationBundleOptions options = new MigrationBundleOptions();
// This will appear in DB_STORAGE_PATH/iota-migration.log
// ANd contain information about old and new addresses
options.setLogFileName("iota-migration.log");
options.setMine(batch.get(0).spent());
long[] indexes = batch.stream().map(i -> i.index()).mapToLong(x -> x).toArray();
MigrationBundle bundle = manager.createMigrationBundle(seed, indexes, options);
System.out.println("bundle: " + bundle);
this.migrationBundleHashes.add(bundle.getBundleHash());
} catch (Exception e) {
e.printStackTrace();
}
}
System.out.println("bundle hashes: " + Arrays.toString(migrationBundleHashes.toArray()));
// Send all bundles to the Tangle and reattach them until they are confirmed
for (String bundleHash : new LinkedList<>(migrationBundleHashes)) {
try {
// 0 for default mwm
manager.sendMigrationBundle(nodes, bundleHash, (short) 0);
} catch (Exception e) {
e.printStackTrace();
}
}
started = true;
return mnemonic;
} else {
System.out.println("Detected 0 balance. Exiting.");
}
} catch (Exception e) {
e.printStackTrace();
}
this.account = null;
return null;
}
private List<List<InputData>> getMigrationBundles(InputData[] inputs){
List<InputData> spent = new LinkedList<>();
List<InputData> unspent = new LinkedList<>();
for (InputData input : inputs){
if (input.spent()) {
spent.add(input);
} else {
unspent.add(input);
}
}
List<List<InputData>> unspentInputChunks = selectInputsForUnspentAddresses(unspent);
List<InputData> spentInputs = spent.stream()
.filter(input -> input.balance() >= MINIMUM_MIGRATION_BALANCE)
.collect(Collectors.toList());
List<List<InputData>> totalList = new LinkedList<>();
spentInputs.stream().forEach(i -> totalList.add( Arrays.asList(i) ) );
unspentInputChunks.stream().forEach(iList -> totalList.add( iList ) );
return totalList;
}
/**
* Prepares inputs (as bundles) for unspent addresses.
* Steps:
* - Categorises inputs in two groups 1) inputs with balance >= MINIMUM_MIGRATION_BALANCE 2) inputs with balance < MINIMUM_MIGRATION_BALANCE
* - Creates chunks of category 1 input addresses such that length of each chunk should not exceed MAX_INPUTS_PER_BUNDLE
* - For category 2:
* - Sort the inputs in descending order based on balance;
* - Pick first N inputs (where N = MAX_INPUTS_PER_BUNDLE) and see if their accumulative balance >= MINIMUM_MIGRATION_BALANCE
* - If yes, then repeat the process for next N inputs. Otherwise, iterate on the remaining inputs and add it to a chunk that has space for more inputs
* - If there's no chunk with space left, then ignore these funds. NOTE THAT THESE FUNDS WILL ESSENTIALLY BE LOST!
*
* NOTE: If the total sum of provided inputs are less than MINIMUM_MIGRATION_BALANCE, then this method will just return and empty array as those funds can't be migrated.
*
* This method gives precedence to max inputs over funds. It ensures, a maximum a bundle could have is 30 inputs and their accumulative balance >= MINIMUM_MIGRATION_BALANCE
*
* @method selectInputsForUnspentAddresses
*
* @param {List<InputData>} inputs
*
* @return {List<List<InputData>>}
*/
private List<List<InputData>> selectInputsForUnspentAddresses(List<InputData> inputs){
long totalInputsBalance = inputs.stream().map(i -> i.balance())
.reduce(0l, (total, input) -> total + input);
// If the total sum of unspent addresses is less than MINIMUM MIGRATION BALANCE,
// just return an empty array as these funds cannot be migrated
if (totalInputsBalance < MINIMUM_MIGRATION_BALANCE) {
return new LinkedList<>();
}
List<InputData> inputsWithEnoughBalance = new LinkedList<>();
List<InputData> inputsWithLowBalance = new LinkedList<>();
for (InputData input : inputs) {
if (input.balance() >= MINIMUM_MIGRATION_BALANCE) {
inputsWithEnoughBalance.add(input);
} else {
inputsWithLowBalance.add(input);
}
}
List<List<InputData>> chunks = new LinkedList<>();
chunks.add(new LinkedList<>());
for (int index=0; index < inputsWithEnoughBalance.size(); index++){
int chunkIndex = (int) Math.floor(index / MAX_INPUTS_PER_BUNDLE);
if (chunkIndex >= chunks.size()) {
chunks.add(new LinkedList<>());
}
chunks.get(chunkIndex).add(inputsWithEnoughBalance.get(index));
}
long totalBalanceOnInputsWithLowBalance = inputsWithLowBalance.stream().map(i -> i.balance())
.reduce(0l, (total, input) -> total + input);
// If all the remaining input addresses have accumulative balance less than the minimum migration balance,
// Then sort the inputs in descending order and try to pair the with open blocks
Collections.sort(inputsWithLowBalance, Collections.reverseOrder(Comparator.comparingLong(InputData::balance)));
if (totalBalanceOnInputsWithLowBalance < MINIMUM_MIGRATION_BALANCE) {
this.fill(chunks, inputsWithLowBalance);
} else {
int startIndex = 0;
int max = (int)java.lang.Math.ceil(inputsWithLowBalance.size() / MAX_INPUTS_PER_BUNDLE);
while (startIndex < max) {
List<InputData> inputsSubset = inputsWithLowBalance.subList(startIndex * MAX_INPUTS_PER_BUNDLE, (startIndex + 1) * MAX_INPUTS_PER_BUNDLE);
long balanceOnInputsSubset = inputsSubset.stream().map(i -> i.balance())
.reduce(0l, (total, input) -> total + input);
if (balanceOnInputsSubset >= MINIMUM_MIGRATION_BALANCE) {
chunks.add(inputsSubset);
} else {
this.fill(chunks, inputsSubset);
}
startIndex++;
}
}
return chunks;
}
private void fill(List<List<InputData>> chunks, List<InputData> _inputs) {
int chunkIndexWithSpaceForInput = 0;
for (InputData input : _inputs){
// Remember old index so we dont check again
int oldIndex = chunkIndexWithSpaceForInput;
chunkIndexWithSpaceForInput = -1;
for (int index=oldIndex; index < chunks.size(); index++){
if (chunks.get(index).size() < MAX_INPUTS_PER_BUNDLE){
chunks.get(index).add(input);
// Update new latest index
chunkIndexWithSpaceForInput = index;
break;
}
}
if (chunkIndexWithSpaceForInput == -1){
return;
}
}
}
}
|
[
"\"SH_PASSWORD\"",
"\"MIGRATION_SEED\""
] |
[] |
[
"MIGRATION_SEED",
"SH_PASSWORD"
] |
[]
|
["MIGRATION_SEED", "SH_PASSWORD"]
|
java
| 2 | 0 | |
pkg/server/metrics.go
|
package server
import (
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/Shopify/goose/srvutil"
"github.com/cds-snc/covid-alert-server/pkg/keyclaim"
"github.com/cds-snc/covid-alert-server/pkg/persistence"
"github.com/gorilla/mux"
"context"
)
const ISODATE string = "2006-01-02"
func NewMetricsServlet(db persistence.Conn, auth keyclaim.Authenticator) srvutil.Servlet {
log(nil, nil).Info("registering metrics servlet")
return &metricsServlet{db: db, auth: auth}
}
type metricsServlet struct {
db persistence.Conn
auth keyclaim.Authenticator
}
const DATEFORMAT string = "\\d{4,4}-\\d{2,2}-\\d{2,2}"
func (m metricsServlet) RegisterRouting(r *mux.Router) {
log(nil, nil).Info("registering metrics route")
r.HandleFunc(fmt.Sprintf("/events/{startDate:%s}", DATEFORMAT), m.handleEventRequest)
}
func authorizeRequest(r *http.Request) error {
uname, pword, ok := r.BasicAuth()
if !ok {
return fmt.Errorf("basic auth required for access")
}
metricUsername := os.Getenv("METRICS_USERNAME")
metricPassword := os.Getenv("METRICS_PASSWORD")
if uname != metricUsername || pword != metricPassword {
return fmt.Errorf("invalid username or password")
}
return nil
}
func (m *metricsServlet) handleEventRequest(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if err := authorizeRequest(r); err != nil {
log(ctx, err).Info("Unauthorized BasicAuth")
http.Error(w, "unauthorized", http.StatusUnauthorized)
}
if r.Method != "GET" {
log(ctx, nil).WithField("method", r.Method).Info("disallowed method")
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
m.getEvents(ctx, w, r)
return
}
func (m *metricsServlet) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
startDateVal := vars["startDate"]
_, err := time.Parse(ISODATE, startDateVal)
if err != nil {
log(ctx, err).Errorf("issue parsing %s", startDateVal)
http.Error(w, "error parsing start date", http.StatusBadRequest)
return
}
events, err := m.db.GetServerEvents(startDateVal)
if err != nil {
log(ctx, err).Errorf("issue getting events")
http.Error(w, "error retrieving events", http.StatusBadRequest)
return
}
w.Header().Add("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
js, err := json.Marshal(events)
if err != nil {
log(ctx, err).WithField("EventResults", events).Errorf("error marshaling events")
http.Error(w, "error building json object", http.StatusInternalServerError)
return
}
_, err = w.Write(js)
if err != nil {
log(ctx, err).Errorf("error writing json")
http.Error(w, "error retrieving results", http.StatusInternalServerError)
}
return
}
|
[
"\"METRICS_USERNAME\"",
"\"METRICS_PASSWORD\""
] |
[] |
[
"METRICS_PASSWORD",
"METRICS_USERNAME"
] |
[]
|
["METRICS_PASSWORD", "METRICS_USERNAME"]
|
go
| 2 | 0 | |
runsc/main.go
|
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Binary runsc is an implementation of the Open Container Initiative Runtime
// that runs applications inside a sandbox.
package main
import (
"io"
"os"
"path/filepath"
"strings"
"syscall"
"context"
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cmd"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
var (
// Although these flags are not part of the OCI spec, they are used by
// Docker, and thus should not be changed.
rootDir = flag.String("root", "", "root directory for storage of container state")
logFilename = flag.String("log", "", "file path where internal debug information is written, default is stdout")
logFormat = flag.String("log-format", "text", "log format: text (default) or json")
debug = flag.Bool("debug", false, "enable debug logging")
// These flags are unique to runsc, and are used to configure parts of the
// system that are not covered by the runtime spec.
// Debugging flags.
debugLogDir = flag.String("debug-log-dir", "", "additional location for logs. It creates individual log files per command")
logPackets = flag.Bool("log-packets", false, "enable network packet logging")
logFD = flag.Int("log-fd", -1, "file descriptor to log to. If set, the 'log' flag is ignored.")
debugLogFD = flag.Int("debug-log-fd", -1, "file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.")
// Debugging flags: strace related
strace = flag.Bool("strace", false, "enable strace")
straceSyscalls = flag.String("strace-syscalls", "", "comma-separated list of syscalls to trace. If --strace is true and this list is empty, then all syscalls will be traced.")
straceLogSize = flag.Uint("strace-log-size", 1024, "default size (in bytes) to log data argument blobs")
// Flags that control sandbox runtime behavior.
platform = flag.String("platform", "ptrace", "specifies which platform to use: ptrace (default), kvm")
network = flag.String("network", "sandbox", "specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.")
fileAccess = flag.String("file-access", "exclusive", "specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.")
overlay = flag.Bool("overlay", false, "wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.")
multiContainer = flag.Bool("multi-container", false, "enable *experimental* multi-container support.")
watchdogAction = flag.String("watchdog-action", "log", "sets what action the watchdog takes when triggered: log (default), panic.")
panicSignal = flag.Int("panic-signal", -1, "register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.")
)
// gitRevision is set during linking.
var gitRevision = ""
func main() {
// Help and flags commands are generated automatically.
subcommands.Register(subcommands.HelpCommand(), "")
subcommands.Register(subcommands.FlagsCommand(), "")
// Register user-facing runsc commands.
subcommands.Register(new(cmd.Checkpoint), "")
subcommands.Register(new(cmd.Create), "")
subcommands.Register(new(cmd.Delete), "")
subcommands.Register(new(cmd.Events), "")
subcommands.Register(new(cmd.Exec), "")
subcommands.Register(new(cmd.Gofer), "")
subcommands.Register(new(cmd.Kill), "")
subcommands.Register(new(cmd.List), "")
subcommands.Register(new(cmd.Pause), "")
subcommands.Register(new(cmd.PS), "")
subcommands.Register(new(cmd.Restore), "")
subcommands.Register(new(cmd.Resume), "")
subcommands.Register(new(cmd.Run), "")
subcommands.Register(new(cmd.Start), "")
subcommands.Register(new(cmd.State), "")
subcommands.Register(new(cmd.Wait), "")
// Register internal commands with the internal group name. This causes
// them to be sorted below the user-facing commands with empty group.
// The string below will be printed above the commands.
const internalGroup = "internal use only"
subcommands.Register(new(cmd.Boot), internalGroup)
subcommands.Register(new(cmd.Debug), internalGroup)
subcommands.Register(new(cmd.Gofer), internalGroup)
// All subcommands must be registered before flag parsing.
flag.Parse()
platformType, err := boot.MakePlatformType(*platform)
if err != nil {
cmd.Fatalf("%v", err)
}
fsAccess, err := boot.MakeFileAccessType(*fileAccess)
if err != nil {
cmd.Fatalf("%v", err)
}
if fsAccess == boot.FileAccessShared && *overlay {
cmd.Fatalf("overlay flag is incompatible with shared file access")
}
netType, err := boot.MakeNetworkType(*network)
if err != nil {
cmd.Fatalf("%v", err)
}
wa, err := boot.MakeWatchdogAction(*watchdogAction)
if err != nil {
cmd.Fatalf("%v", err)
}
// Create a new Config from the flags.
conf := &boot.Config{
RootDir: *rootDir,
Debug: *debug,
LogFilename: *logFilename,
LogFormat: *logFormat,
DebugLogDir: *debugLogDir,
FileAccess: fsAccess,
Overlay: *overlay,
Network: netType,
LogPackets: *logPackets,
Platform: platformType,
Strace: *strace,
StraceLogSize: *straceLogSize,
MultiContainer: *multiContainer,
WatchdogAction: wa,
PanicSignal: *panicSignal,
}
if len(*straceSyscalls) != 0 {
conf.StraceSyscalls = strings.Split(*straceSyscalls, ",")
}
// Set up logging.
if *debug {
log.SetLevel(log.Debug)
}
var logFile io.Writer = os.Stderr
if *logFD > -1 {
logFile = os.NewFile(uintptr(*logFD), "log file")
} else if *logFilename != "" {
// We must set O_APPEND and not O_TRUNC because Docker passes
// the same log file for all commands (and also parses these
// log files), so we can't destroy them on each command.
f, err := os.OpenFile(*logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
cmd.Fatalf("error opening log file %q: %v", *logFilename, err)
}
logFile = f
}
var e log.Emitter
switch *logFormat {
case "text":
e = log.GoogleEmitter{&log.Writer{Next: logFile}}
case "json":
e = log.JSONEmitter{log.Writer{Next: logFile}}
default:
cmd.Fatalf("invalid log format %q, must be 'json' or 'text'", *logFormat)
}
if *debugLogFD > -1 {
f := os.NewFile(uintptr(*debugLogFD), "debug log file")
e = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}
} else if *debugLogDir != "" {
if err := os.MkdirAll(*debugLogDir, 0775); err != nil {
cmd.Fatalf("error creating dir %q: %v", *debugLogDir, err)
}
subcommand := flag.CommandLine.Arg(0)
f, err := specutils.DebugLogFile(*debugLogDir, subcommand)
if err != nil {
cmd.Fatalf("error opening debug log file in %q: %v", *debugLogDir, err)
}
e = log.MultiEmitter{e, log.GoogleEmitter{&log.Writer{Next: f}}}
}
log.SetTarget(e)
log.Infof("***************************")
log.Infof("Args: %s", os.Args)
log.Infof("Git Revision: %s", gitRevision)
log.Infof("PID: %d", os.Getpid())
log.Infof("UID: %d, GID: %d", os.Getuid(), os.Getgid())
log.Infof("Configuration:")
log.Infof("\t\tRootDir: %s", conf.RootDir)
log.Infof("\t\tPlatform: %v", conf.Platform)
log.Infof("\t\tFileAccess: %v, overlay: %t", conf.FileAccess, conf.Overlay)
log.Infof("\t\tNetwork: %v, logging: %t", conf.Network, conf.LogPackets)
log.Infof("\t\tStrace: %t, max size: %d, syscalls: %s", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)
log.Infof("***************************")
// Call the subcommand and pass in the configuration.
var ws syscall.WaitStatus
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
if ws.Signaled() {
// No good way to return it, emulate what the shell does. Maybe raise
// signall to self?
os.Exit(128 + int(ws.Signal()))
}
os.Exit(ws.ExitStatus())
}
// Return an error that is unlikely to be used by the application.
log.Warningf("Failure to execute command, err: %v", subcmdCode)
os.Exit(128)
}
func init() {
// Set default root dir to something (hopefully) user-writeable.
*rootDir = "/var/run/runsc"
if runtimeDir := os.Getenv("XDG_RUNTIME_DIR"); runtimeDir != "" {
*rootDir = filepath.Join(runtimeDir, "runsc")
}
}
|
[
"\"XDG_RUNTIME_DIR\""
] |
[] |
[
"XDG_RUNTIME_DIR"
] |
[]
|
["XDG_RUNTIME_DIR"]
|
go
| 1 | 0 | |
main.py
|
#!/usr/local/bin/python3.4
"""
## Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
"""
import os, logging, json
from tornado import websocket, web, ioloop, httpserver
from interfaces.nbi import app
from slice_manager.script_ssm_slice import app_ssm
from logger import TangoLogger
import threading
import asyncio
#Log definition to make the slice logs idetified among the other possible 5GTango components.
LOG = TangoLogger.getLogger(__name__, log_level=logging.DEBUG, log_json=True)
TangoLogger.getLogger("resistoApi:main", logging.DEBUG, log_json=True)
LOG.setLevel(logging.DEBUG)
def app_ssm_thread():
asyncio.set_event_loop(asyncio.new_event_loop())
http_server = httpserver.HTTPServer(app_ssm)
http_server.listen(4001)
ioloop.IOLoop.instance().start()
########################################### MAIN SERVER FUNCTION ############################################
if __name__ == '__main__':
# RUN APP SLICE SSM THREAD
#http_server = httpserver.HTTPServer(app_ssm)
#http_server.listen(4001)
#ioloop.IOLoop.instance().start()
threading.Thread(target=app_ssm_thread).start()
# RUN API MAIN SERVER THREAD
app.run(debug=True, host='0.0.0.0', port=os.environ.get("RESISTO_API_PORT"))
|
[] |
[] |
[
"RESISTO_API_PORT"
] |
[]
|
["RESISTO_API_PORT"]
|
python
| 1 | 0 | |
example/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
from mezzanine.utils.conf import real_project_name
settings_module = "%s.settings" % real_project_name("example")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/wb/main.go
|
package main
import (
"log"
"os"
"github.com/himetani/workbook/cmd"
)
var (
logger = log.New(os.Stdout, "Info: ", log.LstdFlags)
addr = ":8080"
)
func main() {
cmd.Execute()
}
/*
func main() {
var (
svrStartUp sync.WaitGroup
authCode sync.WaitGroup
consumerKey = os.Getenv("WB_CONSUMER_KEY")
addr = ":8080"
)
client := pocket.NewClient("http://localhost:8080/pocket/redirected", consumerKey, logger)
srv := http.NewServer(client, logger)
ctx, cancel := context.WithCancel(context.Background())
svrStartUp.Add(1)
authCode.Add(1)
go func() {
svrStartUp.Wait()
time.Sleep(1 * time.Second)
if err := client.GetRequestCode(); err != nil {
logger.Panic(err)
}
fmt.Println("")
fmt.Println("Access to https://getpocket.com/auth/authorize?request_token=" + client.RequestCode + "&redirect_uri=" + client.RedirectURL)
authCode.Wait()
cancel()
}()
srv.Serve(addr, &svrStartUp, &authCode, ctx)
authCode.Wait()
}
*/
|
[
"\"WB_CONSUMER_KEY\""
] |
[] |
[
"WB_CONSUMER_KEY"
] |
[]
|
["WB_CONSUMER_KEY"]
|
go
| 1 | 0 | |
spotipy/util.py
|
# -*- coding: utf-8 -*-
""" Shows a user's playlists (need to be authenticated via oauth) """
__all__ = ["CLIENT_CREDS_ENV_VARS", "prompt_for_user_token"]
import logging
import os
import warnings
import spotipy
LOGGER = logging.getLogger(__name__)
CLIENT_CREDS_ENV_VARS = {
"client_id": "SPOTIPY_CLIENT_ID",
"client_secret": "SPOTIPY_CLIENT_SECRET",
"client_username": "SPOTIPY_CLIENT_USERNAME",
"redirect_uri": "SPOTIPY_REDIRECT_URI",
}
def prompt_for_user_token(
username=None,
scope=None,
client_id=None,
client_secret=None,
redirect_uri=None,
cache_path=None,
oauth_manager=None,
show_dialog=False
):
warnings.warn(
"'prompt_for_user_token' is deprecated."
"Use the following instead: "
" auth_manager=SpotifyOAuth(scope=scope)"
" spotipy.Spotify(auth_manager=auth_manager)",
DeprecationWarning
)
""" prompts the user to login if necessary and returns
the user token suitable for use with the spotipy.Spotify
constructor
Parameters:
- username - the Spotify username (optional)
- scope - the desired scope of the request (optional)
- client_id - the client id of your app (required)
- client_secret - the client secret of your app (required)
- redirect_uri - the redirect URI of your app (required)
- cache_path - path to location to save tokens (optional)
- oauth_manager - Oauth manager object (optional)
- show_dialog - If true, a login prompt always shows (optional, defaults to False)
"""
if not oauth_manager:
if not client_id:
client_id = os.getenv("SPOTIPY_CLIENT_ID")
if not client_secret:
client_secret = os.getenv("SPOTIPY_CLIENT_SECRET")
if not redirect_uri:
redirect_uri = os.getenv("SPOTIPY_REDIRECT_URI")
if not client_id:
LOGGER.warning(
"""
You need to set your Spotify API credentials.
You can do this by setting environment variables like so:
export SPOTIPY_CLIENT_ID='your-spotify-client-id'
export SPOTIPY_CLIENT_SECRET='your-spotify-client-secret'
export SPOTIPY_REDIRECT_URI='your-app-redirect-url'
Get your credentials at
https://developer.spotify.com/my-applications
"""
)
raise spotipy.SpotifyException(550, -1, "no credentials set")
sp_oauth = oauth_manager or spotipy.SpotifyOAuth(
client_id,
client_secret,
redirect_uri,
scope=scope,
cache_path=cache_path,
username=username,
show_dialog=show_dialog
)
# try to get a valid token for this user, from the cache,
# if not in the cache, the create a new (this will send
# the user to a web page where they can authorize this app)
token_info = sp_oauth.get_cached_token()
if not token_info:
code = sp_oauth.get_auth_response()
token = sp_oauth.get_access_token(code, as_dict=False)
else:
return token_info["access_token"]
# Auth'ed API request
if token:
return token
else:
return None
def get_host_port(netloc):
if ":" in netloc:
host, port = netloc.split(":", 1)
port = int(port)
else:
host = netloc
port = None
return host, port
|
[] |
[] |
[
"SPOTIPY_CLIENT_SECRET",
"SPOTIPY_CLIENT_ID",
"SPOTIPY_REDIRECT_URI"
] |
[]
|
["SPOTIPY_CLIENT_SECRET", "SPOTIPY_CLIENT_ID", "SPOTIPY_REDIRECT_URI"]
|
python
| 3 | 0 | |
docshtest.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Shell Doctest
"""
from __future__ import print_function
import re
import sys
import os.path
import difflib
import threading
import locale
from io import open
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
PY3 = sys.version_info[0] >= 3
WIN32 = sys.platform == 'win32'
EXNAME = os.path.basename(__file__ if WIN32 else sys.argv[0])
if WIN32:
import tempfile
## Note that locale.getpreferredencoding() does NOT follow
## PYTHONIOENCODING by default, but ``sys.stdout.encoding`` does. In
## PY2, ``sys.stdout.encoding`` without PYTHONIOENCODING set does not
## get any values set in subshells. However, if _preferred_encoding
## is not set to utf-8, it leads to encoding errors.
_preferred_encoding = os.environ.get("PYTHONIOENCODING") or \
locale.getpreferredencoding()
for ext in (".py", ".pyc", ".exe", "-script.py", "-script.pyc"):
if EXNAME.endswith(ext):
EXNAME = EXNAME[:-len(ext)]
break
##
## Python 2 and WIN32 bug correction
##
if WIN32 and not PY3: ## noqa: C901
## Sorry about the following, all this code is to ensure full
## compatibility with python 2.7 under windows about sending unicode
## command-line
import ctypes
import subprocess
import _subprocess
from ctypes import byref, windll, c_char_p, c_wchar_p, c_void_p, \
Structure, sizeof, c_wchar, WinError
from ctypes.wintypes import BYTE, WORD, LPWSTR, BOOL, DWORD, LPVOID, \
HANDLE
##
## Types
##
CREATE_UNICODE_ENVIRONMENT = 0x00000400
LPCTSTR = c_char_p
LPTSTR = c_wchar_p
LPSECURITY_ATTRIBUTES = c_void_p
LPBYTE = ctypes.POINTER(BYTE)
class STARTUPINFOW(Structure):
_fields_ = [
("cb", DWORD), ("lpReserved", LPWSTR),
("lpDesktop", LPWSTR), ("lpTitle", LPWSTR),
("dwX", DWORD), ("dwY", DWORD),
("dwXSize", DWORD), ("dwYSize", DWORD),
("dwXCountChars", DWORD), ("dwYCountChars", DWORD),
("dwFillAtrribute", DWORD), ("dwFlags", DWORD),
("wShowWindow", WORD), ("cbReserved2", WORD),
("lpReserved2", LPBYTE), ("hStdInput", HANDLE),
("hStdOutput", HANDLE), ("hStdError", HANDLE),
]
LPSTARTUPINFOW = ctypes.POINTER(STARTUPINFOW)
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE), ("hThread", HANDLE),
("dwProcessId", DWORD), ("dwThreadId", DWORD),
]
LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION)
class DUMMY_HANDLE(ctypes.c_void_p):
def __init__(self, *a, **kw):
super(DUMMY_HANDLE, self).__init__(*a, **kw)
self.closed = False
def Close(self):
if not self.closed:
windll.kernel32.CloseHandle(self)
self.closed = True
def __int__(self):
return self.value
CreateProcessW = windll.kernel32.CreateProcessW
CreateProcessW.argtypes = [
LPCTSTR, LPTSTR, LPSECURITY_ATTRIBUTES,
LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCTSTR,
LPSTARTUPINFOW, LPPROCESS_INFORMATION,
]
CreateProcessW.restype = BOOL
##
## Patched functions/classes
##
def CreateProcess(executable, args, _p_attr, _t_attr,
inherit_handles, creation_flags, env, cwd,
startup_info):
"""Create a process supporting unicode executable and args for win32
Python implementation of CreateProcess using CreateProcessW for Win32
"""
si = STARTUPINFOW(
dwFlags=startup_info.dwFlags,
wShowWindow=startup_info.wShowWindow,
cb=sizeof(STARTUPINFOW),
## XXXvlab: not sure of the casting here to ints.
hStdInput=int(startup_info.hStdInput),
hStdOutput=int(startup_info.hStdOutput),
hStdError=int(startup_info.hStdError),
)
wenv = None
if env is not None:
## LPCWSTR seems to be c_wchar_p, so let's say CWSTR is c_wchar
env = (unicode("").join([
unicode("%s=%s\0") % (k, v)
for k, v in env.items()])) + unicode("\0")
wenv = (c_wchar * len(env))()
wenv.value = env
pi = PROCESS_INFORMATION()
creation_flags |= CREATE_UNICODE_ENVIRONMENT
if CreateProcessW(executable, args, None, None,
inherit_handles, creation_flags,
wenv, cwd, byref(si), byref(pi)):
return (DUMMY_HANDLE(pi.hProcess), DUMMY_HANDLE(pi.hThread),
pi.dwProcessId, pi.dwThreadId)
raise WinError()
class Popen(subprocess.Popen):
"""This superseeds Popen and corrects a bug in cPython 2.7 implem"""
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Code from part of _execute_child from Python 2.7 (9fbb65e)
There are only 2 little changes concerning the construction of
the the final string in shell mode: we preempt the creation of
the command string when shell is True, because original function
will try to encode unicode args which we want to avoid to be able to
sending it as-is to ``CreateProcess``.
"""
if not isinstance(args, subprocess.types.StringTypes):
args = subprocess.list2cmdline(args)
if startupinfo is None:
startupinfo = subprocess.STARTUPINFO()
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", unicode("cmd.exe"))
args = unicode('{} /c "{}"').format(comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
w9xpopen = self._find_w9xpopen()
args = unicode('"%s" %s') % (w9xpopen, args)
creationflags |= _subprocess.CREATE_NEW_CONSOLE
super(Popen, self)._execute_child(
args, executable,
preexec_fn, close_fds, cwd, env, universal_newlines,
startupinfo, creationflags, False, to_close, p2cread,
p2cwrite, c2pread, c2pwrite, errread, errwrite)
_subprocess.CreateProcess = CreateProcess
from subprocess import PIPE
else:
from subprocess import Popen, PIPE
class Phile(object):
"""File like API to read fields separated by any delimiters
It'll take care of file decoding to unicode.
This is an adaptor on a file object.
>>> if PY3:
... from io import BytesIO
... def File(s):
... _obj = BytesIO()
... _obj.write(s.encode(_preferred_encoding))
... _obj.seek(0)
... return _obj
... else:
... from cStringIO import StringIO as File
>>> f = Phile(File("a-b-c-d"))
Read provides an iterator:
>>> def show(l):
... print(", ".join(l))
>>> show(f.read(delimiter="-"))
a, b, c, d
You can change the buffersize loaded into memory before outputing
your changes. It should not change the iterator output:
>>> f = Phile(File("é-à-ü-d"), buffersize=3)
>>> len(list(f.read(delimiter="-")))
4
>>> f = Phile(File("foo-bang-yummy"), buffersize=3)
>>> show(f.read(delimiter="-"))
foo, bang, yummy
>>> f = Phile(File("foo-bang-yummy"), buffersize=1)
>>> show(f.read(delimiter="-"))
foo, bang, yummy
Empty file is considered one empty field::
>>> f = Phile(File(""))
>>> len(list(f.read(delimiter="-")))
1
"""
def __init__(self, filename, buffersize=4096, encoding=_preferred_encoding):
self._file = filename
self._buffersize = buffersize
self._encoding = encoding
def read(self, delimiter="\n"):
buf = ""
if PY3:
delimiter = delimiter.encode(_preferred_encoding)
buf = buf.encode(_preferred_encoding)
while True:
chunk = self._file.read(self._buffersize)
if not chunk:
yield buf.decode(self._encoding)
return
records = chunk.split(delimiter)
records[0] = buf + records[0]
for record in records[:-1]:
yield record.decode(self._encoding)
buf = records[-1]
def write(self, buf):
if PY3:
buf = buf.encode(self._encoding)
return self._file.write(buf)
def close(self):
return self._file.close()
class Proc(Popen):
def __init__(self, command, env=None, encoding=_preferred_encoding):
super(Proc, self).__init__(
command, stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=ON_POSIX, env=env,
universal_newlines=False)
self.stdin = Phile(self.stdin, encoding=encoding)
self.stdout = Phile(self.stdout, encoding=encoding)
self.stderr = Phile(self.stderr, encoding=encoding)
USAGE = """\
Usage:
%(exname)s (-h|--help)
%(exname)s [[-r|--regex REGEX] ...] DOCSHTEST_FILE
""" % {"exname": EXNAME}
HELP = """\
%(exname)s - parse file and run shell doctests
%(usage)s
Options:
-r REGEX, --regex REGEX
Will apply this regex to the lines to be executed. You
can have more than one patterns by re-using this options
as many times as wanted. Regexps will be applied one by one
in the same order than they are provided on the command line.
Examples:
## run tests but replace executable on-the-fly for coverage support
docshtest README.rst -r '/\\bdocshtest\\b/coverage run docshtest.py/'
""" % {"exname": EXNAME, "usage": USAGE}
## command line quoting
cmd_line_quote = (lambda e: e.replace('\\', '\\\\')) if WIN32 else (lambda e: e)
##
## Helpers coming from othe projects
##
## XXXvlab: code comes from kids.txt.diff
def udiff(a, b, fa="", fb=""):
if not a.endswith("\n"):
a += "\n"
if not b.endswith("\n"):
b += "\n"
return "".join(
difflib.unified_diff(
a.splitlines(1), b.splitlines(1),
fa, fb))
## XXXvlab: code comes from ``kids.sh``
ON_POSIX = 'posix' in sys.builtin_module_names
__ENV__ = {}
## XXXvlab: code comes from ``kids.txt``
## Note that a quite equivalent function was added to textwrap in python 3.3
def indent(text, prefix=" ", first=None):
if first is not None:
first_line = text.split("\n")[0]
rest = '\n'.join(text.split("\n")[1:])
return '\n'.join([first + first_line,
indent(rest, prefix=prefix)])
return '\n'.join([prefix + line
for line in text.split('\n')])
## XXXvlab: consider for inclusion in ``kids.sh``
def cmd_iter(cmd):
"""Asynchrone subprocess driver
returns an iterator that yields events of the life of the
process.
"""
def thread_enqueue(label, f, q):
t = threading.Thread(target=enqueue_output, args=(label, f, q))
t.daemon = True ## thread dies with the program
t.start()
return t
def enqueue_output(label, out, queue):
prev_line = None
for line in out.read():
if prev_line is not None:
queue.put((label, "%s\n" % prev_line))
prev_line = line
# print("%s: %r" % (label, line))
# print("END of %s" % (label, ))
if prev_line:
queue.put((label, prev_line))
out.close()
proc = Proc(cmd)
proc.stdin.close()
q = Queue()
t1 = thread_enqueue("out", proc.stdout, q)
t2 = thread_enqueue("err", proc.stderr, q)
running = True
while True:
try:
yield q.get(True, 0.001)
except Empty:
if not running:
break
proc.poll()
running = proc.returncode is None or \
any(t.is_alive() for t in (t1, t2))
# print("%s: %r" % ("errlvl", proc.returncode))
yield "errorlevel", proc.returncode
## XXXvlab: consider for inclusion in ``kids.txt``
def chomp(s):
if len(s):
lines = s.splitlines(True)
last = lines.pop()
return ''.join(lines + last.splitlines())
else:
return ''
def get_docshtest_blocks(lines):
"""Returns an iterator of shelltest blocks from an iterator of lines"""
block = []
consecutive_empty = 0
for line_nb, line in enumerate(lines):
is_empty_line = not line.strip()
if not is_empty_line:
if not line.startswith(" "):
if block:
yield block[:-consecutive_empty] \
if consecutive_empty else block
block = []
continue
else:
line = line[4:]
if line.startswith("$ ") or block:
if line.startswith("$ "):
line = line[2:]
if block:
yield block[:-consecutive_empty] \
if consecutive_empty else block
block = []
if is_empty_line:
consecutive_empty += 1
else:
consecutive_empty = 0
block.append((line_nb + 1, line))
if block:
yield block[:-consecutive_empty] \
if consecutive_empty else block
def bash_iter(cmd, syntax_check=False):
cmd_seq = ["bash", ]
if syntax_check:
cmd_seq.append("-n")
if WIN32:
## Encoding on windows command line is complicated, and
## it seems bash doesn't know how to handle this complexity
## as :
## bash -c "echo é" ## bash: $'echo \303\251': command not found
## bash -c "echo ok" ## ok
with tempfile.TemporaryFile() as tf:
tf.write(cmd.encode("utf-8"))
tf.flush()
cmd_seq.append(tf.name)
for ev, value in cmd_iter(cmd_seq):
yield ev, value
else:
cmd_seq.extend(["-c", cmd])
for ev, value in cmd_iter(cmd_seq):
yield ev, value
def valid_syntax(command):
"""Check if shell command if complete"""
for ev, value in bash_iter(command, syntax_check=True):
if ev == "err":
if value.endswith("syntax error: unexpected end of file"):
return False
if "unexpected EOF while looking for matching" in value:
return False
if "here-document at line" in value:
return False
return value == 0
class UnmatchedLine(Exception):
def __init__(self, *args):
self.args = args
class Ignored(Exception):
def __init__(self, *args):
self.args = args
def run_and_check(command, expected_output): ## noqa: C901
global __ENV__
meta_commands = list(get_meta_commands(command))
for meta_command in meta_commands:
if meta_command[0] == "ignore-if":
if meta_command[1] in __ENV__:
raise Ignored(*meta_command)
if meta_command[0] == "ignore-if-not":
if meta_command[1] not in __ENV__:
raise Ignored(*meta_command)
expected_output = expected_output.replace("<BLANKLINE>\n", "\n")
orig_expected_output = expected_output
output = ""
diff = False
for ev, value in bash_iter(command):
if ev in ("err", "out"):
if WIN32:
value = value.replace("\r\n", "\n")
output += value
if not diff and expected_output.startswith(value):
expected_output = expected_output[len(value):]
else:
diff = True
if not diff and len(chomp(expected_output)):
diff = True
for meta_command in meta_commands:
if meta_command[0] == "if-success-set":
if not diff:
__ENV__[meta_command[1]] = 1
raise Ignored(*meta_command)
else:
raise Ignored(*meta_command)
if diff:
raise UnmatchedLine(output, orig_expected_output)
return value == 0
def format_failed_test(message, command, output, expected):
formatted = []
formatted.append("command:\n%s" % indent(command, "| "))
formatted.append("expected:\n%s" % indent(expected, "| ").strip())
formatted.append("output:\n%s" % indent(output, "| ").strip())
if len(expected.splitlines() + output.splitlines()) > 10:
formatted.append(
"diff:\n%s"
% udiff(expected, output, "expected", "output").strip())
formatted = '\n'.join(formatted)
return "%s\n%s" % (message, indent(formatted, prefix=" "))
def apply_regex(patterns, s):
for p in patterns:
s = re.sub(p[0], p[1], s)
return s
META_COMMAND_REGEX = '##? docshtest: (?P<cmd>.*)$'
def get_meta_commands(command):
for m in re.finditer(META_COMMAND_REGEX, command):
raw_cmd = m.groupdict()["cmd"]
cmd = raw_cmd.strip()
cmd = re.sub(' +', ' ', cmd)
yield cmd.split(' ')
def shtest_runner(lines, regex_patterns):
def _lines(start_line_nb, stop_line_nb):
return (("lines %9s" % ("%s-%s" % (start_line_nb, stop_line_nb)))
if start_line_nb != stop_line_nb else
("line %10s" % start_line_nb))
for block_nb, block in enumerate(get_docshtest_blocks(lines)):
lines = iter(block)
command_block = ""
start_line_nb = None
stop_line_nb = None
for line_nb, line in lines:
start_line_nb = start_line_nb or line_nb
command_block += line
if valid_syntax(apply_regex(regex_patterns,
command_block)):
stop_line_nb = line_nb
break
else:
raise ValueError("Invalid Block:\n%s"
% (indent(command_block, " | ")))
command_block = command_block.rstrip("\n\r")
command_block = apply_regex(regex_patterns, command_block)
try:
run_and_check(command_block, "".join(line for _, line in lines))
except UnmatchedLine as e:
safe_print(format_failed_test(
"#%04d - failure (%15s):"
% (block_nb + 1, _lines(start_line_nb, stop_line_nb)),
command_block,
e.args[0],
e.args[1]))
exit(1)
except Ignored as e:
print("#%04d - ignored (%15s): %s"
% (block_nb + 1,
_lines(start_line_nb, stop_line_nb),
" ".join(e.args)))
else:
print("#%04d - success (%15s)"
% (block_nb + 1, _lines(start_line_nb, stop_line_nb)))
sys.stdout.flush()
def split_quote(s, split_char='/', quote='\\'):
r"""Split args separated by char, possibily quoted with quote char
>>> tuple(split_quote('/pattern/replace/'))
('', 'pattern', 'replace', '')
>>> tuple(split_quote('/pat\/tern/replace/'))
('', 'pat/tern', 'replace', '')
>>> tuple(split_quote('/pat\/ter\n/replace/'))
('', 'pat/ter\n', 'replace', '')
"""
buf = ""
parse_str = iter(s)
for char in parse_str:
if char == split_char:
yield buf
buf = ""
continue
if char == quote:
char = next(parse_str)
if char != split_char:
buf += quote
buf += char
yield buf
def safe_print(content):
if not PY3:
if isinstance(content, unicode):
content = content.encode(_preferred_encoding)
print(content, end='')
sys.stdout.flush()
def main(args):
pattern = None
if any(arg in args for arg in ["-h", "--help"]):
print(HELP)
exit(0)
patterns = []
for arg in ["-r", "--regex"]:
while arg in args:
idx = args.index(arg)
pattern = args[idx + 1]
del args[idx + 1]
del args[idx]
if re.match('^[a-zA-Z0-9]$', pattern[0]):
print("Error: regex %s should start with a delimiter char, "
"not an alphanumerical char." % pattern)
print(USAGE)
exit(1)
parts = tuple(split_quote(pattern, split_char=pattern[0]))
if not (parts[0] == parts[-1] == ''):
print("Error: regex should start and"
"end with a delimiter char.")
exit(1)
parts = parts[1:-1]
if len(parts) > 2:
print("Error: Found too many delimiter char.")
exit(1)
patterns.append(parts)
if len(args) == 0:
print("Error: please provide a rst filename as argument."
" (use '--help' option to get usage info)")
exit(1)
filename = args[0]
if not os.path.exists(filename):
print("Error: file %r not found." % filename)
exit(1)
shtest_runner(open(filename, encoding=_preferred_encoding),
regex_patterns=patterns)
def entrypoint():
sys.exit(main(sys.argv[1:]))
if __name__ == "__main__":
entrypoint()
|
[] |
[] |
[
"PYTHONIOENCODING",
"COMSPEC"
] |
[]
|
["PYTHONIOENCODING", "COMSPEC"]
|
python
| 2 | 0 | |
config/config.go
|
package config
import (
"os"
"github.com/jinzhu/configor"
amazonpay "github.com/qor/amazon-pay-sdk-go"
"github.com/qor/auth/providers/facebook"
"github.com/qor/auth/providers/github"
"github.com/qor/auth/providers/google"
"github.com/qor/auth/providers/twitter"
"github.com/qor/gomerchant"
"github.com/qor/location"
"github.com/qor/mailer"
"github.com/qor/mailer/logger"
"github.com/qor/media/oss"
"github.com/qor/oss/s3"
"github.com/qor/redirect_back"
"github.com/qor/session/manager"
"github.com/unrolled/render"
)
type SMTPConfig struct {
Host string
Port string
User string
Password string
}
var Config = struct {
HTTPS bool `default:"false" env:"HTTPS"`
Port uint `default:"7000" env:"PORT"`
DB struct {
Name string `env:"DBName" default:"qor_example"`
Adapter string `env:"DBAdapter" default:"mysql"`
Host string `env:"DBHost" default:"localhost"`
Port string `env:"DBPort" default:"3306"`
User string `env:"DBUser"`
Password string `env:"DBPassword"`
}
S3 struct {
AccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
Region string `env:"AWS_Region"`
S3Bucket string `env:"AWS_Bucket"`
}
AmazonPay struct {
MerchantID string `env:"AmazonPayMerchantID"`
AccessKey string `env:"AmazonPayAccessKey"`
SecretKey string `env:"AmazonPaySecretKey"`
ClientID string `env:"AmazonPayClientID"`
ClientSecret string `env:"AmazonPayClientSecret"`
Sandbox bool `env:"AmazonPaySandbox"`
CurrencyCode string `env:"AmazonPayCurrencyCode" default:"JPY"`
}
SMTP SMTPConfig
Github github.Config
Google google.Config
Facebook facebook.Config
Twitter twitter.Config
GoogleAPIKey string `env:"GoogleAPIKey"`
BaiduAPIKey string `env:"BaiduAPIKey"`
}{}
var (
Root = os.Getenv("GOPATH") + "/src/github.com/qor/qor-example"
Mailer *mailer.Mailer
Render = render.New()
AmazonPay *amazonpay.AmazonPay
PaymentGateway gomerchant.PaymentGateway
RedirectBack = redirect_back.New(&redirect_back.Config{
SessionManager: manager.SessionManager,
IgnoredPrefixes: []string{"/auth"},
})
)
func init() {
if err := configor.Load(&Config, "config/database.yml", "config/smtp.yml", "config/application.yml"); err != nil {
panic(err)
}
location.GoogleAPIKey = Config.GoogleAPIKey
location.BaiduAPIKey = Config.BaiduAPIKey
if Config.S3.AccessKeyID != "" {
oss.Storage = s3.New(&s3.Config{
AccessID: Config.S3.AccessKeyID,
AccessKey: Config.S3.SecretAccessKey,
Region: Config.S3.Region,
Bucket: Config.S3.S3Bucket,
})
}
AmazonPay = amazonpay.New(&amazonpay.Config{
MerchantID: Config.AmazonPay.MerchantID,
AccessKey: Config.AmazonPay.AccessKey,
SecretKey: Config.AmazonPay.SecretKey,
Sandbox: true,
Region: "jp",
})
// dialer := gomail.NewDialer(Config.SMTP.Host, Config.SMTP.Port, Config.SMTP.User, Config.SMTP.Password)
// sender, err := dialer.Dial()
// Mailer = mailer.New(&mailer.Config{
// Sender: gomailer.New(&gomailer.Config{Sender: sender}),
// })
Mailer = mailer.New(&mailer.Config{
Sender: logger.New(&logger.Config{}),
})
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
test/helpers/ssh_command.go
|
// Copyright 2017-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helpers
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"time"
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
// SSHCommand stores the data associated with executing a command.
// TODO: this is poorly named in that it's not related to a command only
// ran over SSH - rename this.
type SSHCommand struct {
// TODO: path is not a clear name - rename to something more clear.
Path string
Env []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// SSHClient stores the information needed to SSH into a remote location for
// running tests.
type SSHClient struct {
Config *ssh.ClientConfig // ssh client configuration information.
Host string // Ip/Host from the target virtualserver
Port int // Port to connect to the target server
client *ssh.Client // Client implements a traditional SSH client that supports shells,
// subprocesses, TCP port/streamlocal forwarding and tunneled dialing.
}
// GetHostPort returns the host port representation of the ssh client
func (cli *SSHClient) GetHostPort() string {
return net.JoinHostPort(cli.Host, strconv.Itoa(cli.Port))
}
// SSHConfig contains metadata for an SSH session.
type SSHConfig struct {
target string
host string
user string
port int
identityFile string
}
// SSHConfigs maps the name of a host (VM) to its corresponding SSHConfiguration
type SSHConfigs map[string]*SSHConfig
// GetSSHClient initializes an SSHClient based on the provided SSHConfig
func (cfg *SSHConfig) GetSSHClient() *SSHClient {
sshConfig := &ssh.ClientConfig{
User: cfg.user,
Auth: []ssh.AuthMethod{
cfg.GetSSHAgent(),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: 15 * time.Second,
}
return &SSHClient{
Config: sshConfig,
Host: cfg.host,
Port: cfg.port,
}
}
func (client *SSHClient) String() string {
return fmt.Sprintf("host: %s, port: %d, user: %s", client.Host, client.Port, client.Config.User)
}
func (cfg *SSHConfig) String() string {
return fmt.Sprintf("target: %s, host: %s, port %d, user, %s, identityFile: %s", cfg.target, cfg.host, cfg.port, cfg.user, cfg.identityFile)
}
// GetSSHAgent returns the ssh.AuthMethod corresponding to SSHConfig cfg.
func (cfg *SSHConfig) GetSSHAgent() ssh.AuthMethod {
key, err := ioutil.ReadFile(cfg.identityFile)
if err != nil {
log.Fatalf("unable to retrieve ssh-key on target '%s': %s", cfg.target, err)
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
log.Fatalf("unable to parse private key on target '%s': %s", cfg.target, err)
}
return ssh.PublicKeys(signer)
}
// ImportSSHconfig imports the SSH configuration stored at the provided path.
// Returns an error if the SSH configuration could not be instantiated.
func ImportSSHconfig(config []byte) (SSHConfigs, error) {
result := make(SSHConfigs)
cfg, err := ssh_config.Decode(bytes.NewBuffer(config))
if err != nil {
return nil, err
}
for _, host := range cfg.Hosts {
key := host.Patterns[0].String()
if key == "*" {
continue
}
port, _ := cfg.Get(key, "Port")
hostConfig := SSHConfig{target: key}
hostConfig.host, _ = cfg.Get(key, "Hostname")
hostConfig.identityFile, _ = cfg.Get(key, "identityFile")
hostConfig.user, _ = cfg.Get(key, "User")
hostConfig.port, _ = strconv.Atoi(port)
result[key] = &hostConfig
}
return result, nil
}
// copyWait runs an instance of io.Copy() in a goroutine, and returns a channel
// to receive the error result.
func copyWait(dst io.Writer, src io.Reader) chan error {
c := make(chan error, 1)
go func() {
_, err := io.Copy(dst, src)
c <- err
}()
return c
}
// runCommand runs the specified command on the provided SSH session, and
// gathers both of the sterr and stdout output into the writers provided by
// cmd. Returns whether the command was run and an optional error.
// Returns nil when the command completes successfully and all stderr,
// stdout output has been written. Returns an error otherwise.
func runCommand(session *ssh.Session, cmd *SSHCommand) (bool, error) {
stderr, err := session.StderrPipe()
if err != nil {
return false, fmt.Errorf("Unable to setup stderr for session: %v", err)
}
errChan := copyWait(cmd.Stderr, stderr)
stdout, err := session.StdoutPipe()
if err != nil {
return false, fmt.Errorf("Unable to setup stdout for session: %v", err)
}
outChan := copyWait(cmd.Stdout, stdout)
if err = session.Run(cmd.Path); err != nil {
return false, err
}
if err = <-errChan; err != nil {
return true, err
}
if err = <-outChan; err != nil {
return true, err
}
return true, nil
}
// RunCommand runs a SSHCommand using SSHClient client. The returned error is
// nil if the command runs, has no problems copying stdin, stdout, and stderr,
// and exits with a zero exit status.
func (client *SSHClient) RunCommand(cmd *SSHCommand) error {
session, err := client.newSession()
if err != nil {
return err
}
defer session.Close()
_, err = runCommand(session, cmd)
return err
}
// RunCommandInBackground runs an SSH command in a similar way to
// RunCommandContext, but with a context which allows the command to be
// cancelled at any time. When cancel is called the error of the command is
// returned instead the context error.
func (client *SSHClient) RunCommandInBackground(ctx context.Context, cmd *SSHCommand) error {
if ctx == nil {
panic("nil context provided to RunCommandInBackground()")
}
session, err := client.newSession()
if err != nil {
return err
}
defer session.Close()
modes := ssh.TerminalModes{
ssh.ECHO: 1, // enable echoing
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
session.RequestPty("xterm-256color", 80, 80, modes)
stdin, err := session.StdinPipe()
if err != nil {
log.Errorf("Could not get stdin: %s", err)
}
go func() {
select {
case <-ctx.Done():
_, err := stdin.Write([]byte{3})
if err != nil {
log.Errorf("write ^C error: %s", err)
}
err = session.Wait()
if err != nil {
log.Errorf("wait error: %s", err)
}
if err = session.Signal(ssh.SIGHUP); err != nil {
log.Errorf("failed to kill command: %s", err)
}
if err = session.Close(); err != nil {
log.Errorf("failed to close session: %s", err)
}
}
}()
_, err = runCommand(session, cmd)
return err
}
// RunCommandContext runs an SSH command in a similar way to RunCommand but with
// a context. If context is canceled it will return the error of that given
// context.
func (client *SSHClient) RunCommandContext(ctx context.Context, cmd *SSHCommand) error {
if ctx == nil {
panic("nil context provided to RunCommandContext()")
}
var (
session *ssh.Session
sessionErrChan = make(chan error, 1)
)
go func() {
var sessionErr error
// This may block depending on the state of the setup tests are being
// ran against. As a result, these goroutines may leak, but the logic
// below will fail and propagate to the rest of the CI framework, which
// will error out anyway. It's better to leak in really bad cases since
// the CI will fail anyway. Unfortunately, the golang SSH library does
// not provide a way to propagate context through to creating sessions.
// Note that this is a closure on the session variable!
session, sessionErr = client.newSession()
if sessionErr != nil {
log.Infof("error creating session: %s", sessionErr)
sessionErrChan <- sessionErr
return
}
_, runErr := runCommand(session, cmd)
sessionErrChan <- runErr
}()
select {
case asyncErr := <-sessionErrChan:
return asyncErr
case <-ctx.Done():
if session != nil {
log.Warning("sending SIGHUP to session due to canceled context")
if err := session.Signal(ssh.SIGHUP); err != nil {
log.Errorf("failed to kill command when context is canceled: %s", err)
}
if closeErr := session.Close(); closeErr != nil {
log.WithError(closeErr).Error("failed to close session")
}
} else {
log.Error("timeout reached; no session was able to be created")
}
return ctx.Err()
}
}
func (client *SSHClient) newSession() (*ssh.Session, error) {
var connection *ssh.Client
var err error
if client.client != nil {
connection = client.client
} else {
connection, err = ssh.Dial(
"tcp",
net.JoinHostPort(client.Host, fmt.Sprintf("%d", client.Port)),
client.Config)
if err != nil {
return nil, fmt.Errorf("failed to dial: %s", err)
}
client.client = connection
}
session, err := connection.NewSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %s", err)
}
return session, nil
}
// SSHAgent returns the ssh.Authmethod using the Public keys. Returns nil if
// a connection to SSH_AUTH_SHOCK does not succeed.
func SSHAgent() ssh.AuthMethod {
if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
}
return nil
}
// GetSSHClient initializes an SSHClient for the specified host/port/user
// combination.
func GetSSHClient(host string, port int, user string) *SSHClient {
sshConfig := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
SSHAgent(),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: 15 * time.Second,
}
return &SSHClient{
Config: sshConfig,
Host: host,
Port: port,
}
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
Lib/site-packages/hackedit/__init__.py
|
"""
The hackable editor for the desktop, built with Python3 and PyQt5.
"""
import os
os.environ['QT_API'] = 'pyqt5'
__version__ = '1.0a2'
|
[] |
[] |
[
"QT_API"
] |
[]
|
["QT_API"]
|
python
| 1 | 0 | |
tests/tests.py
|
import sys, os
sys.path.append(os.path.abspath("."))
import unittest
from bot import Bot
from vk.plus import asyncio, Wait
from vk.data import Message, MAX_LENGHT
from vk.helpers import upload_doc, upload_photo
try:
from settings_real import BotSettings
except ImportError:
from settings import BotSettings
user_token = os.environ.get('SKETAL_USER_TOKEN', '')
if user_token:
BotSettings.USERS = (
("user", user_token,),
)
class TestBot(unittest.TestCase):
bot = Bot(BotSettings)
def test_loading(self):
with self.assertLogs(self.bot.logger, level='INFO') as cm:
self.bot = Bot(BotSettings, logger=self.bot.logger)
self.assertIn(f'INFO:{self.bot.logger.name}:Initializing bot', cm.output)
self.assertIn(f'INFO:{self.bot.logger.name}:Initializing vk clients', cm.output)
self.assertIn(f'INFO:{self.bot.logger.name}:Loading plugins', cm.output)
def test_longpoll(self):
task = self.bot.longpoll_run(True)
async def bot_killer():
await asyncio.sleep(5)
self.bot.stop_bot()
asyncio.ensure_future(bot_killer(), loop=self.bot.loop)
with self.assertLogs(self.bot.logger, level='INFO') as cm:
with self.assertRaises(asyncio.CancelledError):
self.bot.loop.run_until_complete(task)
self.assertEqual(cm.output, [f'INFO:{self.bot.logger.name}:Attempting to turn bot off'])
def test_callback(self):
task = self.bot.callback_run(True)
async def bot_killer():
await asyncio.sleep(5)
self.bot.stop_bot()
asyncio.ensure_future(bot_killer(), loop=self.bot.loop)
with self.assertLogs(self.bot.logger, level='INFO') as cm:
with self.assertRaises(asyncio.CancelledError):
self.bot.loop.run_until_complete(task)
self.assertEqual(cm.output, [f'INFO:{self.bot.logger.name}:Attempting to turn bot off'])
def test_errors(self):
with self.assertLogs(self.bot.logger, level='ERROR') as cm:
self.bot.do(self.bot.api.messages.send())
self.assertIn(r"ERROR:sketal:Errors while executing vk method: {'code': 100, 'method': 'messages.send', 'error_msg': 'One of the parameters specified was missing or invalid: you should specify peer_id, user_id, domain, chat_id or user_ids param'}, {'code': 100, 'method': 'execute', 'error_msg': 'One of the parameters specified was missing or invalid: you should specify peer_id, user_id, domain, chat_id or user_ids param'}", cm.output)
def test_upload(self):
with open("tests/simple_image.png", "rb") as f:
result = self.bot.do(upload_photo(self.bot.api, f.read()))
self.assertIsNotNone(result)
self.assertNotEqual(result.url, "")
self.bot.do(self.bot.api.photos.delete(owner_id=result.owner_id, photo_id=result.id))
with open("tests/simple_image.png", "rb") as f:
result = self.bot.do(upload_doc(self.bot.api, f.read(), "image.png"))
self.assertIsNotNone(result)
self.assertNotEqual(result.url, "")
self.bot.do(self.bot.api.docs.delete(owner_id=result.owner_id, doc_id=result.id))
def test_accumulative_methods(self):
async def work():
sender = self.bot.api.get_default_sender("wall.getById")
with self.bot.api.mass_request():
tas1 = await self.bot.api.method_accumulative("wall.getById", {}, {"posts": "-145935681_515"},
sender=sender, wait=Wait.CUSTOM)
tas2 = await self.bot.api.method_accumulative("wall.getById", {}, {"posts": "-145935681_512"},
sender=sender, wait=Wait.CUSTOM)
tas3 = await self.bot.api.method_accumulative("wall.getById", {}, {"posts": "-145935681_511"},
sender=sender, wait=Wait.CUSTOM)
if tas1 is False and tas2 is False and tas3 is False:
return
await asyncio.gather(tas1, tas2, tas3, loop=self.bot.loop, return_exceptions=True)
self.assertEqual(tas1.result()["id"], 515)
self.assertEqual(tas2.result()["id"], 512)
self.assertEqual(tas3.result()["id"], 511)
self.bot.do(work())
class TestVkUtils(unittest.TestCase):
def check(self, message):
self.assertLessEqual(len(message), MAX_LENGHT)
self.assertGreaterEqual(len(message), 1)
def test_simple_message(self):
result = Message.prepare_message("hi")
size = 0
for r in result:
self.check(r)
size += 1
self.assertEqual(size, 1)
def test_long_messages(self):
result = Message.prepare_message("a" * MAX_LENGHT)
for r in result:
self.check(r)
result = Message.prepare_message(("a" * (MAX_LENGHT - 1) + "\n") * 2)
for r in result:
self.check(r)
result = Message.prepare_message(("a" * MAX_LENGHT + "\n") * 2)
for r in result:
self.check(r)
def test_bad_messages(self):
result = list(Message.prepare_message("a\n" * (MAX_LENGHT // 2)))
for r in result:
self.check(r)
self.assertEqual(len(result), 1)
result = Message.prepare_message("a" * (MAX_LENGHT * 3))
for r in result:
self.check(r)
result = list(Message.prepare_message("a " * int(MAX_LENGHT * 2.9)))
for r in result:
self.check(r)
self.assertEqual(len(result), 6)
result = list(Message.prepare_message("a" * MAX_LENGHT + " a"))
for r in result:
self.check(r)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[-1]), 1)
result = list(Message.prepare_message("a" * MAX_LENGHT + " aaaa"))
for r in result:
self.check(r)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[-1]), 4)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"SKETAL_USER_TOKEN"
] |
[]
|
["SKETAL_USER_TOKEN"]
|
python
| 1 | 0 | |
pandaclient/PdbUtils.py
|
import os
import re
import sys
import time
import datetime
from .MiscUtils import commands_get_status_output
try:
long()
except Exception:
long = int
from . import PLogger
from .LocalJobSpec import LocalJobSpec
from .LocalJobsetSpec import LocalJobsetSpec
class PdbProxy:
# constructor
def __init__(self,verbose=False):
# database engine
self.engine = 'sqlite3'
# version of database schema
self.version = '0_0_1'
# database file name
self.filename = 'pandajob.db'
# database dir
self.database_dir = os.path.expanduser(os.environ['PANDA_CONFIG_ROOT'])
# full path of database file
self.database = '%s/%s' % (self.database_dir,self.filename)
# table name
self.tablename = 'jobtable_%s' % self.version
# verbose
self.verbose = verbose
# connection
self.con = None
# logger
self.log = PLogger.getPandaLogger()
# set verbose
def setVerbose(self,verbose):
# verbose
self.verbose = verbose
# execute SQL
def execute(self,sql,var={}):
# logger
tmpLog = PLogger.getPandaLogger()
# expand variables
for tmpKey in var:
tmpVal = var[tmpKey]
sql = sql.replqce(tmpKey,str(tmpVal))
# construct command
com = '%s %s "%s"' % (self.engine,self.database,sql)
if self.verbose:
tmpLog.debug("DB Req : " + com)
# execute
nTry = 5
status =0
for iTry in range(nTry):
if self.verbose:
tmpLog.debug(" Try : %s/%s" % (iTry,nTry))
status,output = commands_get_status_output(com)
status %= 255
if status == 0:
break
if iTry+1 < nTry:
time.sleep(2)
# return
if status != 0:
tmpLog.error(status)
tmpLog.error(output)
return False,output
else:
if self.verbose:
tmpLog.debug(" Ret : " + output)
outList = output.split('\n')
# remove ''
try:
outList.remove('')
except Exception:
pass
# remove junk messages
ngStrings = ['Loading resources from']
for tmpStr in tuple(outList):
# look for NG strings
flagNG = False
for ngStr in ngStrings:
match = re.search(ngStr,tmpStr,re.I)
if match is not None:
flagNG = True
break
# remove
if flagNG:
try:
outList.remove(tmpStr)
except Exception:
pass
return True,outList
# execute SQL
def execute_direct(self, sql, var=None, fetch=False):
if self.con is None:
import sqlite3
self.con = sqlite3.connect(self.database, check_same_thread=False)
if self.verbose:
self.log.debug("DB Req : {0} var={1}".format(sql, str(var)))
cur = self.con.cursor()
try:
if var is None:
var = {}
cur.execute(sql, var)
retVal = True
except Exception:
retVal = False
if not self.verbose:
self.log.error("DB Req : {0} var={1}".format(sql, str(var)))
err_type, err_value = sys.exc_info()[:2]
err_str = "{0} {1}".format(err_type.__name__, err_value)
self.log.error(err_str)
if self.verbose:
self.log.debug(retVal)
outList = []
if retVal:
if fetch:
outList = cur.fetchall()
if self.verbose:
for item in outList:
self.log.debug(" Ret : " + str(item))
self.con.commit()
return retVal, outList
# remove old database
def deleteDatabase(self):
commands_get_status_output('rm -f %s' % self.database)
# initialize database
def initialize(self):
# import sqlite3
# check if sqlite3 is available
com = 'which %s' % self.engine
status,output = commands_get_status_output(com)
if status != 0:
errstr = "\n\n"
errstr += "ERROR : %s is not available in PATH\n\n" % self.engine
errstr += "There are some possible solutions\n"
errstr += " * run this application under Athena runtime with Release 14 or higher. e.g.,\n"
errstr += " $ source setup.sh -tag=14.2.24,32,setup\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * set PATH and LD_LIBRARY_PATH to include %s. e.g., at CERN\n" % self.engine
errstr += " $ export PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/bin:$PATH\n"
errstr += " $ export LD_LIBRARY_PATH=/afs/cern.ch/sw/lcg/external/sqlite/3.4.0/slc3_ia32_gcc323/lib:$LD_LIBRARY_PATH\n"
errstr += " $ source .../etc/panda/panda_setup.sh\n\n"
errstr += " * install %s from the standard SL4 repository. e.g.,\n" % self.engine
errstr += " $ yum install %s\n\n" % self.engine
errstr += " * use SLC5\n"
raise RuntimeError(errstr)
# create dir for DB
if not os.path.exists(self.database_dir):
os.makedirs(self.database_dir)
# the table already exist
if self.checkTable():
return
# create table
self.createTable()
return
# check table
def checkTable(self):
# get tables
retS,retV = self.execute('.table')
if not retS:
raise RuntimeError("cannot get tables")
# the table already exist or not
if retV == []:
return False
if self.tablename not in retV[-1].split():
return False
# check schema
self.checkSchema()
return True
# check schema
def checkSchema(self,noAdd=False):
# get colum names
retS,retV = self.execute('PRAGMA table_info(%s)' % self.tablename)
if not retS:
raise RuntimeError("cannot get table_info")
# parse
columns = []
for line in retV:
items = line.split('|')
if len(items) > 1:
columns.append(items[1])
# check
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
if tmpC not in columns:
if noAdd:
raise RuntimeError("%s not found in database schema" % tmpC)
# add column
retS,retV = self.execute("ALTER TABLE %s ADD COLUMN '%s' %s" % \
(self.tablename,tmpC,tmpA))
if not retS:
raise RuntimeError("cannot add %s to database schema" % tmpC)
if noAdd:
return
# check whole schema just in case
self.checkSchema(noAdd=True)
# create table
def createTable(self):
# ver 0_1_1
sql = "CREATE TABLE %s (" % self.tablename
sql += "'id' INTEGER PRIMARY KEY,"
sql += "'JobID' INTEGER,"
sql += "'PandaID' TEXT,"
sql += "'jobStatus' TEXT,"
sql += "'site' VARCHAR(128),"
sql += "'cloud' VARCHAR(20),"
sql += "'jobType' VARCHAR(20),"
sql += "'jobName' VARCHAR(128),"
sql += "'inDS' TEXT,"
sql += "'outDS' TEXT,"
sql += "'libDS' VARCHAR(255),"
sql += "'jobParams' TEXT,"
sql += "'retryID' INTEGER,"
sql += "'provenanceID' INTEGER,"
sql += "'creationTime' TIMESTAMP,"
sql += "'lastUpdate' TIMESTAMP,"
sql += "'dbStatus' VARCHAR(20),"
sql += "'buildStatus' VARCHAR(20),"
sql += "'commandToPilot' VARCHAR(20),"
for tmpC in LocalJobSpec.appended:
tmpA = LocalJobSpec.appended[tmpC]
sql += "'%s' %s," % (tmpC,tmpA)
sql = sql[:-1]
sql += ")"
# execute
retS,retV = self.execute(sql)
if not retS:
raise RuntimeError("failed to create %s" % self.tablename)
# confirm
if not self.checkTable():
raise RuntimeError("failed to confirm %s" % self.tablename)
# convert Panda jobs to DB representation
def convertPtoD(pandaJobList,pandaIDstatus,localJob=None,fileInfo={},pandaJobForSiteID=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# sort by PandaID
pandIDs = list(pandaIDstatus)
pandIDs.sort()
pStr = ''
sStr = ''
ddata.commandToPilot = ''
for tmpID in pandIDs:
# PandaID
pStr += '%s,' % tmpID
# status
sStr += '%s,' % pandaIDstatus[tmpID][0]
# commandToPilot
if pandaIDstatus[tmpID][1] == 'tobekilled':
ddata.commandToPilot = 'tobekilled'
pStr = pStr[:-1]
sStr = sStr[:-1]
# job status
ddata.jobStatus = sStr
# PandaID
ddata.PandaID = pStr
# get panda Job
pandaJob = None
if pandaJobList != []:
# look for buildJob since it doesn't have the first PandaID when retried
for pandaJob in pandaJobList:
if pandaJob.prodSourceLabel == 'panda':
break
elif pandaJobForSiteID is not None:
pandaJob = pandaJobForSiteID
# extract libDS
if pandaJob is not None:
if pandaJob.prodSourceLabel == 'panda':
# build Jobs
ddata.buildStatus = pandaJob.jobStatus
for tmpFile in pandaJob.Files:
if tmpFile.type == 'output':
ddata.libDS = tmpFile.dataset
break
else:
# noBuild or libDS
ddata.buildStatus = ''
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and tmpFile.lfn.endswith('.lib.tgz'):
ddata.libDS = tmpFile.dataset
break
# release
ddata.releaseVar = pandaJob.AtlasRelease
# cache
tmpCache = re.sub('^[^-]+-*','',pandaJob.homepackage)
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# return if update status only
if statusOnly:
# build job
if ddata.buildStatus != '':
ddata.buildStatus = sStr.split(',')[0]
# set computingSite mainly for rebrokerage
if pandaJobForSiteID is not None:
ddata.site = pandaJobForSiteID.computingSite
ddata.nRebro = pandaJobForSiteID.specialHandling.split(',').count('rebro') + \
pandaJobForSiteID.specialHandling.split(',').count('sretry')
# return
return ddata
# job parameters
ddata.jobParams = pandaJob.metadata
# extract datasets
iDSlist = []
oDSlist = []
if fileInfo != {}:
if 'inDS' in fileInfo:
iDSlist = fileInfo['inDS']
if 'outDS' in fileInfo:
oDSlist = fileInfo['outDS']
else:
for pandaJob in pandaJobList:
for tmpFile in pandaJob.Files:
if tmpFile.type == 'input' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in iDSlist:
iDSlist.append(tmpFile.dataset)
elif tmpFile.type == 'output' and not tmpFile.lfn.endswith('.lib.tgz'):
if tmpFile.dataset not in oDSlist:
oDSlist.append(tmpFile.dataset)
# convert to string
ddata.inDS = ''
for iDS in iDSlist:
ddata.inDS += '%s,' % iDS
ddata.inDS = ddata.inDS[:-1]
ddata.outDS = ''
for oDS in oDSlist:
ddata.outDS += '%s,' % oDS
ddata.outDS = ddata.outDS[:-1]
# job name
ddata.jobName = pandaJob.jobName
# creation time
ddata.creationTime = pandaJob.creationTime
# job type
ddata.jobType = pandaJob.prodSeriesLabel
# site
ddata.site = pandaJob.computingSite
# cloud
ddata.cloud = pandaJob.cloud
# job ID
ddata.JobID = pandaJob.jobDefinitionID
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = pandaJob.jobExecutionID
# groupID
ddata.groupID = pandaJob.jobsetID
ddata.retryJobsetID = -1
if pandaJob.sourceSite not in ['NULL',None,'']:
ddata.parentJobsetID = long(pandaJob.sourceSite)
else:
ddata.parentJobsetID = -1
# job type
ddata.jobType = pandaJob.processingType
# the number of rebrokerage actions
ddata.nRebro = pandaJob.specialHandling.split(',').count('rebro')
# jediTaskID
ddata.jediTaskID = -1
# return
return ddata
# convert JediTask to DB representation
def convertJTtoD(jediTaskDict,localJob=None):
statusOnly = False
if localJob is not None:
# update status only
ddata = localJob
statusOnly = True
else:
# create new spec
ddata = LocalJobSpec()
# max IDs
maxIDs = 20
# task status
ddata.taskStatus = jediTaskDict['status']
# statistic
ddata.jobStatus = jediTaskDict['statistics']
# PandaID
ddata.PandaID = ''
for tmpPandaID in jediTaskDict['PandaID'][:maxIDs]:
ddata.PandaID += '%s,' % tmpPandaID
ddata.PandaID = ddata.PandaID[:-1]
if len(jediTaskDict['PandaID']) > maxIDs:
ddata.PandaID += ',+%sIDs' % (len(jediTaskDict['PandaID'])-maxIDs)
# merge status
if 'mergeStatus' not in jediTaskDict or jediTaskDict['mergeStatus'] is None:
ddata.mergeJobStatus = 'NA'
else:
ddata.mergeJobStatus = jediTaskDict['mergeStatus']
# merge PandaID
ddata.mergeJobID = ''
for tmpPandaID in jediTaskDict['mergePandaID'][:maxIDs]:
ddata.mergeJobID += '%s,' % tmpPandaID
ddata.mergeJobID = ddata.mergeJobID[:-1]
if len(jediTaskDict['mergePandaID']) > maxIDs:
ddata.mergeJobID += ',+%sIDs' % (len(jediTaskDict['mergePandaID'])-maxIDs)
# return if update status only
if statusOnly:
return ddata
# release
ddata.releaseVar = jediTaskDict['transUses']
# cache
if jediTaskDict['transHome'] is None:
tmpCache = ''
else:
tmpCache = re.sub('^[^-]+-*','',jediTaskDict['transHome'])
tmpCache = re.sub('_','-',tmpCache)
ddata.cacheVar = tmpCache
# job parameters
try:
if isinstance(jediTaskDict['cliParams'],unicode):
ddata.jobParams = jediTaskDict['cliParams'].encode('utf_8')
else:
ddata.jobParams = jediTaskDict['cliParams']
# truncate
ddata.jobParams = ddata.jobParams[:1024]
except Exception:
pass
# input datasets
try:
# max number of datasets to show
maxDS = 20
inDSs = jediTaskDict['inDS'].split(',')
strInDS = ''
# concatenate
for tmpInDS in inDSs[:maxDS]:
strInDS += "%s," % tmpInDS
strInDS = strInDS[:-1]
# truncate
if len(inDSs) > maxDS:
strInDS += ',+{0}DSs'.format(len(inDSs)-maxDS)
ddata.inDS = strInDS
except Exception:
ddata.inDS = jediTaskDict['inDS']
# output datasets
ddata.outDS = jediTaskDict['outDS']
# job name
ddata.jobName = jediTaskDict['taskName']
# creation time
ddata.creationTime = jediTaskDict['creationDate']
# job type
ddata.jobType = jediTaskDict['processingType']
# site
ddata.site = jediTaskDict['site']
# cloud
ddata.cloud = jediTaskDict['cloud']
# job ID
ddata.JobID = jediTaskDict['reqID']
# retry ID
ddata.retryID = 0
# provenance ID
ddata.provenanceID = 0
# groupID
ddata.groupID = jediTaskDict['reqID']
# jediTaskID
ddata.jediTaskID = jediTaskDict['jediTaskID']
# IDs for retry
ddata.retryJobsetID = -1
ddata.parentJobsetID = -1
# the number of rebrokerage actions
ddata.nRebro = 0
# return
return ddata
# instantiate database proxy
pdbProxy = PdbProxy()
# just initialize DB
def initialzieDB(verbose=False,restoreDB=False):
if restoreDB:
pdbProxy.deleteDatabase()
pdbProxy.initialize()
pdbProxy.setVerbose(verbose)
# insert job info to DB
def insertJobDB(job,verbose=False):
tmpLog = PLogger.getPandaLogger()
# set update time
job.lastUpdate = datetime.datetime.utcnow()
# make sql
sql1 = "INSERT INTO %s (%s) " % (pdbProxy.tablename,LocalJobSpec.columnNames())
sql1+= "VALUES " + job.values()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to insert job")
# update job info in DB
def updateJobDB(job,verbose=False,updateTime=None):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += job.values(forUpdate=True)
sql1 += " WHERE JobID=%s " % job.JobID
# set update time
if updateTime is not None:
job.lastUpdate = updateTime
sql1 += " AND lastUpdate<'%s' " % updateTime.strftime('%Y-%m-%d %H:%M:%S')
else:
job.lastUpdate = datetime.datetime.utcnow()
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to update job")
# set retryID
def setRetryID(job,verbose=False):
# make sql
sql1 = "UPDATE %s SET " % pdbProxy.tablename
sql1 += "retryID=%s,retryJobsetID=%s " % (job.JobID,job.groupID)
sql1 += " WHERE JobID=%s AND (nRebro IS NULL OR nRebro=%s)" % (job.provenanceID,job.nRebro)
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to set retryID")
# delete old jobs
def deleteOldJobs(days,verbose=False):
# time limit
limit = datetime.datetime.utcnow() - datetime.timedelta(days=days)
# make sql
sql1 = "DELETE FROM %s " % pdbProxy.tablename
sql1 += " WHERE creationTime<'%s' " % limit.strftime('%Y-%m-%d %H:%M:%S')
status,out = pdbProxy.execute_direct(sql1)
if not status:
raise RuntimeError("failed to delete old jobs")
# read job info from DB
def readJobDB(JobID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE JobID=%s" % JobID
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get JobID=%s" % JobID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
for values in out:
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen':
return job
# return any
return job
# read jobset info from DB
def readJobsetDB(JobsetID,verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get JobsetID=%s" % JobsetID)
if len(out) == 0:
return None
# instantiate LocalJobSpec
tmpJobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# return frozen job if exists
if job.dbStatus == 'frozen' or job.JobID not in tmpJobMap:
tmpJobMap[job.JobID] = job
# make jobset
jobset = LocalJobsetSpec()
# set jobs
jobset.setJobs(tmpJobMap.values())
# return any
return jobset
# check jobset status in DB
def checkJobsetStatus(JobsetID,verbose=False):
# logger
tmpLog = PLogger.getPandaLogger()
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
sql1+= "WHERE groupID=%s" % JobsetID
failedRet = False,None
# execute
status,out = pdbProxy.execute(sql1)
if not status:
tmpLog.error(out)
tmpLog.error("failed to access local DB")
return failedRet
if len(out) == 0:
tmpLog.error("failed to get JobsetID=%s from local DB" % JobsetID)
return None
# instantiate LocalJobSpec
jobMap = {}
for tmpStr in out:
values = tmpStr.split('|')
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in jobMap or job.dbStatus == 'frozen':
jobMap[job.JobID] = job
# check all job status
for tmpJobID in jobMap:
tmpJobSpec = jobMap[tmpJobID]
if tmpJobSpec != 'frozen':
return True,'running'
# return
return True,'frozen'
# bulk read job info from DB
def bulkReadJobDB(verbose=False):
# make sql
sql1 = "SELECT %s FROM %s " % (LocalJobSpec.columnNames(),pdbProxy.tablename)
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get jobs")
if len(out) == 0:
return []
# instantiate LocalJobSpec
retMap = {}
jobsetMap = {}
for values in out:
job = LocalJobSpec()
job.pack(values)
# use frozen job if exists
if job.JobID not in retMap or job.dbStatus == 'frozen':
if job.groupID in [0,'0','NULL',-1,'-1']:
retMap[long(job.JobID)] = job
else:
# add jobset
tmpJobsetID = long(job.groupID)
if tmpJobsetID not in retMap or tmpJobsetID not in jobsetMap:
jobsetMap[tmpJobsetID] = []
jobset = LocalJobsetSpec()
retMap[tmpJobsetID] = jobset
# add job
jobsetMap[tmpJobsetID].append(job)
# add jobs to jobset
for tmpJobsetID in jobsetMap:
tmpJobList = jobsetMap[tmpJobsetID]
retMap[tmpJobsetID].setJobs(tmpJobList)
# sort
ids = list(retMap)
ids.sort()
retVal = []
for id in ids:
retVal.append(retMap[id])
# return
return retVal
# get list of JobID
def getListOfJobIDs(nonFrozen=False,verbose=False):
# make sql
sql1 = "SELECT JobID,dbStatus FROM %s " % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allList = []
frozenList = []
for item in out:
# extract JobID
tmpID = long(item[0])
# status in DB
tmpStatus = item[-1]
# keep all jobs
if tmpID not in allList:
allList.append(tmpID)
# keep frozen jobs
if nonFrozen and tmpStatus == 'frozen':
if tmpID not in frozenList:
frozenList.append(tmpID)
# remove redundant jobs
retVal = []
for item in allList:
if item not in frozenList:
retVal.append(item)
# sort
retVal.sort()
# return
return retVal
# get map of jobsetID and JobIDs
def getMapJobsetIDJobIDs(verbose=False):
# make sql
sql1 = "SELECT groupID,JobID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute(sql1)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item.split('|')[0])
# JobID
tmpJobID = long(item.split('|')[-1])
# append
if tmpJobsetID not in allMap:
allMap[tmpJobsetID] = []
if tmpJobID not in allMap[tmpJobsetID]:
allMap[tmpJobsetID].append(tmpJobID)
# sort
for tmpKey in allMap.keys():
allMap[tmpKey].sort()
# return
return allMap
# make JobSetSpec
def makeJobsetSpec(jobList):
jobset = LocalJobsetSpec()
jobset.setJobs(jobList)
return jobset
# get map of jobsetID and jediTaskID
def getJobsetTaskMap(verbose=False):
# make sql
sql1 = "SELECT groupID,jediTaskID FROM %s WHERE groupID is not NULL and groupID != 0 and groupID != '' and jediTaskID is not null and jediTaskID != ''" % pdbProxy.tablename
# execute
status,out = pdbProxy.execute_direct(sql1, fetch=True)
if not status:
raise RuntimeError("failed to get list of JobIDs")
allMap = {}
for item in out:
# JobsetID
tmpJobsetID = long(item[0])
# JobID
jediTaskID = long(item[-1])
# append
allMap[jediTaskID] = tmpJobsetID
# return
return allMap
|
[] |
[] |
[
"PANDA_CONFIG_ROOT"
] |
[]
|
["PANDA_CONFIG_ROOT"]
|
python
| 1 | 0 | |
fintrans/1.9/influx-ingest/main.go
|
// Copyright 2016 Mesosphere. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
log "github.com/Sirupsen/logrus"
"github.com/influxdata/influxdb/client/v2"
)
const (
onversion string = "0.1.0"
prodInfluxAPI string = "http://influxdb.marathon.l4lb.thisdcos.directory:8086"
)
var (
version bool
wg sync.WaitGroup
cities []string
// FQDN/IP + port of a Kafka broker:
broker string
// the URL for the InfluxDB HTTP API:
influxAPI string
// into which InfluxDB database to ingest transactions:
targetdb string
// how many seconds to wait between ingesting transactions:
ingestwaitsec time.Duration
// the ingestion queue
iqueue chan Transaction
)
// Transaction defines a single transaction:
// in which City it originated, the Source
// account the money came from as well as
// the Target account the money goes to and
// last but not least the Amount that was
// transferred in the transaction.
type Transaction struct {
City string
Source string
Target string
Amount int
}
func about() {
fmt.Printf("\nThis is the fintrans InfluxDB ingestion consumer in version %s\n", onversion)
}
func init() {
cities = []string{
"London",
"NYC",
"SF",
"Moscow",
"Tokyo",
}
// the commend line parameters:
flag.BoolVar(&version, "version", false, "Display version information")
flag.StringVar(&broker, "broker", "", "The FQDN or IP address and port of a Kafka broker. Example: broker-1.kafka.mesos:9382 or 10.0.3.178:9398")
flag.Usage = func() {
fmt.Printf("Usage: %s [args]\n\n", os.Args[0])
fmt.Println("Arguments:")
flag.PrintDefaults()
}
flag.Parse()
// the optional environment variables:
influxAPI = prodInfluxAPI
if ia := os.Getenv("INFLUX_API"); ia != "" {
influxAPI = ia
}
targetdb = "fintrans"
if td := os.Getenv("INFLUX_TARGET_DB"); td != "" {
targetdb = td
}
ingestwaitsec = 1
if iw := os.Getenv("INGEST_WAIT_SEC"); iw != "" {
if iwi, err := strconv.Atoi(iw); err == nil {
ingestwaitsec = time.Duration(iwi)
}
}
// creating the buffered channel holding up to 100 transactions:
iqueue = make(chan Transaction, 100)
}
func ingest() {
if c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: influxAPI,
Username: "root",
Password: "root",
}); err != nil {
log.WithFields(log.Fields{"func": "ingest"}).Error(err)
} else {
defer c.Close()
log.WithFields(log.Fields{"func": "ingest"}).Info("Connected to ", fmt.Sprintf("%#v", c))
for {
t := <-iqueue
log.WithFields(log.Fields{"func": "ingest"}).Info(fmt.Sprintf("Dequeued %#v", t))
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: targetdb,
Precision: "s", // second resultion
})
log.WithFields(log.Fields{"func": "ingest"}).Info(fmt.Sprintf("Preparing batch %#v", bp))
tags := map[string]string{
"source": t.Source,
"target": t.Target,
}
fields := map[string]interface{}{
"amount": t.Amount,
}
pt, _ := client.NewPoint(t.City, tags, fields, time.Now())
bp.AddPoint(pt)
log.WithFields(log.Fields{"func": "ingest"}).Info(fmt.Sprintf("Added point %#v", pt))
if err := c.Write(bp); err != nil {
log.WithFields(log.Fields{"func": "ingest"}).Error("Could not ingest transaction: ", err.Error())
} else {
log.WithFields(log.Fields{"func": "ingest"}).Info(fmt.Sprintf("Ingested %#v", bp))
}
time.Sleep(ingestwaitsec * time.Second)
log.WithFields(log.Fields{"func": "ingest"}).Info(fmt.Sprintf("Current queue length: %d", len(iqueue)))
}
}
}
func consume(topic string) {
var consumer sarama.Consumer
defer wg.Done()
if c, err := sarama.NewConsumer([]string{broker}, nil); err != nil {
log.WithFields(log.Fields{"func": "consume"}).Error(err)
return
} else {
consumer = c
}
defer func() {
if err := consumer.Close(); err != nil {
log.WithFields(log.Fields{"func": "consume"}).Error(err)
}
}()
if partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest); err != nil {
log.WithFields(log.Fields{"func": "consume"}).Error(err)
return
} else {
defer func() {
if err := partitionConsumer.Close(); err != nil {
log.WithFields(log.Fields{"func": "consume"}).Error(err)
}
}()
for {
msg := <-partitionConsumer.Messages()
traw := strings.Split(string(msg.Value), " ")
amount := 0
if a, err := strconv.Atoi(traw[2]); err == nil {
amount = a
}
t := Transaction{City: msg.Topic, Source: traw[0], Target: traw[1], Amount: amount}
iqueue <- t
log.WithFields(log.Fields{"func": "consume"}).Info(fmt.Sprintf("Queued %#v", t))
}
}
}
func main() {
if version {
about()
os.Exit(0)
}
if broker == "" {
flag.Usage()
os.Exit(1)
}
wg.Add(len(cities))
go ingest()
for _, city := range cities {
go consume(city)
}
wg.Wait()
}
|
[
"\"INFLUX_API\"",
"\"INFLUX_TARGET_DB\"",
"\"INGEST_WAIT_SEC\""
] |
[] |
[
"INFLUX_TARGET_DB",
"INGEST_WAIT_SEC",
"INFLUX_API"
] |
[]
|
["INFLUX_TARGET_DB", "INGEST_WAIT_SEC", "INFLUX_API"]
|
go
| 3 | 0 | |
tests/msg_hz_test.py
|
#!/usr/bin/env python3
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
import time
import threading
import traceback
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from mycelium.components import RedisBridge, Connector
from mycelium_utils import Scripter, utils, DefaultConfig
from pymavlink import mavutil
class RedisToAPScripterExt:
instance = None
i=0
def __init__(self, **kwargs):
if not RedisToAPScripterExt.instance:
RedisToAPScripterExt.instance = RedisToAPScripterExt.__RedisToAPScripterExt(**kwargs)
def __getattr__(self, name):
return getattr(self.instance, name)
class __RedisToAPScripterExt(Scripter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rb = RedisBridge(db=self.rd_cfg.databases['instruments'])
self.keys = self.rd_cfg.generate_flat_keys('instruments')
self.conn = mavutil.mavlink_connection(
self.cfg.redis_to_ap,
autoreconnect = True,
source_system = 1,
source_component = 93,
baud=self.cfg.connection_baudrate,
force_connected=True
)
self.lock = threading.Lock()
default_msg_hz = 30.0
msg_hz = {
'send_vision_position_estimate': 30.0,
'send_obstacle_distance': 15.0
}
self.mavlink_thread = threading.Thread(target=self.mavlink_loop, args=[self.conn])
self.mavlink_thread.start()
self.sched = BackgroundScheduler()
logging.getLogger('apscheduler').setLevel(logging.ERROR)
self.data = {}
for k, v in self.keys.items():
try:
if v in msg_hz.keys():
seconds = 1.0/msg_hz[v]
else:
seconds = 1.0/default_msg_hz
func = getattr(self, v)
self.sched.add_job(self.send_message,
'interval',
seconds=seconds,
args=[func, k],
max_instances=1
)
except:
utils.progress(traceback)
else:
self.data[k] = None
# https://mavlink.io/en/messages/common.html#VISION_POSITION_ESTIMATE
def send_vision_position_estimate(self, current_time_us, x, y, z,
roll, pitch, yaw, covariance, reset_counter):
# self.connect(self.connection_string, self.connection_baudrate, self.source_system, self.source_component)
self.conn.mav.vision_position_estimate_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
x, # Local X position
y, # Local Y position
z, # Local Z position
roll, # Roll angle
pitch, # Pitch angle
yaw, # Yaw angle
covariance, # Row-major representation of pose 6x6 cross-covariance matrix
reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
)
# def send_vision_position_delta_message(self, current_time_us, delta_time_us, delta_angle_rad, delta_position_m, current_confidence_level):
# conn.mav.vision_position_delta_send(
# current_time_us, # us: Timestamp (UNIX time or time since system boot)
# delta_time_us, # us: Time since last reported camera frame
# delta_angle_rad, # float[3] in radian: Defines a rotation vector in body frame that rotates the vehicle from the previous to the current orientation
# delta_position_m, # float[3] in m: Change in position from previous to current frame rotated into body frame (0=forward, 1=right, 2=down)
# current_confidence_level # Normalized confidence value from 0 to 100.
# )
# def send_vision_speed_estimate(self, current):
# self.conn.mav.vision_speed_estimate_send(
# current_time_us, # us Timestamp (UNIX time or time since system boot)
# V_aeroRef_aeroBody[0][3], # Global X speed
# V_aeroRef_aeroBody[1][3], # Global Y speed
# V_aeroRef_aeroBody[2][3], # Global Z speed
# covariance, # covariance
# reset_counter # Estimate reset counter. Increment every time pose estimate jumps.
# )
# https://mavlink.io/en/messages/common.html#OBSTACLE_DISTANCE
def send_obstacle_distance(self, current_time_us, sensor_type, distances, increment,
min_distance, max_distance, increment_f, angle_offset, mav_frame):
# self.connect(self.connection_string, self.connection_baudrate, self.source_system, self.source_component)
self.conn.mav.obstacle_distance_send(
current_time_us, # us Timestamp (UNIX time or time since system boot)
sensor_type, # sensor_type, defined here: https://mavlink.io/en/messages/common.html#MAV_DISTANCE_SENSOR
distances, # distances, uint16_t[72], cm
increment, # increment, uint8_t, deg
min_distance, # min_distance, uint16_t, cm
max_distance, # max_distance, uint16_t, cm
increment_f, # increment_f, float, deg
angle_offset, # angle_offset, float, deg
mav_frame # MAV_FRAME, vehicle-front aligned: https://mavlink.io/en/messages/common.html#MAV_FRAME_BODY_FRD
)
def run_main(self):
self.sched.start()
while not self.exit_threads:
with self.lock:
for k, _ in self.keys.items():
self.data[k] = self.rb.get_key_by_string(k)
# time.sleep(0.3)
# self.conn.send_heartbeat()
# m = self.conn.get_callbacks(['HEARTBEAT'])
# if m is None:
# continue
# self.logger.log_debug("Received callback: %s" % m)
# # utils.progress(m)
def mavlink_loop(self, conn, callbacks=['HEARTBEAT']):
while not self.exit_threads:
self.conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC,
0,
0,
0)
m = self.conn.recv_match(type=callbacks, timeout=1, blocking=True)
if m is None:
continue
self.logger.log_debug("Received callback: %s" % m)
def send_message(self, func, key):
while not self.exit_threads:
with self.lock:
try:
value = self.data[key]
if value is not None:
func(*value)
except Exception as e:
self.logger.log_error("Could not send %s"%e)
def close_script(self):
try:
self.sched.shutdown()
self.mavlink_thread.join()
self.conn.close()
except:
pass
scripter = RedisToAPScripterExt(log_source="redis_to_ap")
scripter.run()
|
[] |
[] |
[
"MAVLINK20"
] |
[]
|
["MAVLINK20"]
|
python
| 1 | 0 | |
tests/download_helpers/sourceforge.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the download helper object implementations."""
import os
import unittest
from l2tdevtools.download_helpers import sourceforge
from tests import test_lib
@unittest.skipIf(
os.environ.get('APPVEYOR', ''), 'Test is flaky for Windows on AppVeyor')
class SourceForgeDownloadHelperTest(test_lib.BaseTestCase):
"""Tests for the Source Forge download helper."""
_DOWNLOAD_URL = 'https://sourceforge.net/projects/pyparsing/files'
_PROJECT_NAME = 'pyparsing'
# Hard-coded version to check parsing of SourceForge page.
_PROJECT_VERSION = '2.2.0'
def testGetLatestVersion(self):
"""Tests the GetLatestVersion functions."""
download_helper = sourceforge.SourceForgeDownloadHelper(self._DOWNLOAD_URL)
latest_version = download_helper.GetLatestVersion(self._PROJECT_NAME, None)
self.assertEqual(latest_version, self._PROJECT_VERSION)
def testGetDownloadURL(self):
"""Tests the GetDownloadURL functions."""
download_helper = sourceforge.SourceForgeDownloadHelper(self._DOWNLOAD_URL)
download_url = download_helper.GetDownloadURL(
self._PROJECT_NAME, self._PROJECT_VERSION)
expected_download_url = (
'https://downloads.sourceforge.net/project/{0:s}/{0:s}/{0:s}-{1:s}'
'/{0:s}-{1:s}.tar.gz').format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertEqual(download_url, expected_download_url)
def testGetProjectIdentifier(self):
"""Tests the GetProjectIdentifier functions."""
download_helper = sourceforge.SourceForgeDownloadHelper(self._DOWNLOAD_URL)
project_identifier = download_helper.GetProjectIdentifier()
expected_project_identifier = 'net.sourceforge.projects.{0:s}'.format(
self._PROJECT_NAME)
self.assertEqual(project_identifier, expected_project_identifier)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"APPVEYOR"
] |
[]
|
["APPVEYOR"]
|
python
| 1 | 0 | |
src/functions/send_webhook/main.go
|
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/aws/aws-lambda-go/lambda"
)
type MyAttribute struct {
Room string `json:"room"`
}
type MyPlacementInfo struct {
Attributes MyAttribute `json:"attributes"`
}
type MyEvent struct {
PlacementInfo MyPlacementInfo `json:"placementInfo"`
}
type Payload struct {
Message string `json:"message"`
Meta MyEvent `json:"meta"`
}
// HandleRequest puts lastmodified to s3
func HandleRequest(ctx context.Context, event MyEvent) (string, error) {
log.Print(event)
var webhookURL = os.Getenv("WEBHOOK_URL")
var message = os.Getenv("MESSAGE")
log.Print(message)
log.Print(fmt.Sprintf(message, event.PlacementInfo.Attributes.Room))
var payload = Payload{
Message: fmt.Sprintf(message, event.PlacementInfo.Attributes.Room),
Meta: event}
params, err := json.Marshal(payload)
if err != nil {
return "", err
}
req, err := http.NewRequest(
"POST",
webhookURL,
bytes.NewBuffer(params),
)
if err != nil {
return "", err
}
// Content-Type 設定
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
return "normal end", nil
}
func main() {
lambda.Start(HandleRequest)
}
|
[
"\"WEBHOOK_URL\"",
"\"MESSAGE\""
] |
[] |
[
"WEBHOOK_URL",
"MESSAGE"
] |
[]
|
["WEBHOOK_URL", "MESSAGE"]
|
go
| 2 | 0 | |
libvirt/helpers_test.go
|
package libvirt
import (
"fmt"
"log"
"os"
"reflect"
"strings"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/libvirt/libvirt-go"
"github.com/libvirt/libvirt-go-xml"
"github.com/terraform-providers/terraform-provider-ignition/ignition"
)
// This file contain function helpers used for testsuite/testacc
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"libvirt": testAccProvider,
"ignition": ignition.Provider(),
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("LIBVIRT_DEFAULT_URI"); v == "" {
t.Fatal("LIBVIRT_DEFAULT_URI must be set for acceptance tests")
}
}
func testAccEnabled() bool {
v := os.Getenv("TF_ACC")
return v == "1" || strings.ToLower(v) == "true"
}
func skipIfPrivilegedDisabled(t *testing.T) {
if os.Getenv("TF_LIBVIRT_DISABLE_PRIVILEGED_TESTS") != "" {
t.Skip("skipping test; Enviornemnt variable `TF_LIBVIRT_DISABLE_PRIVILEGED_TESTS` is set")
}
}
// //////////////////////////////////////////////////////////////////
// general
// //////////////////////////////////////////////////////////////////
// getResourceFromTerraformState get aresource by name
// from terraform states produced during testacc
// and return the resource
func getResourceFromTerraformState(resourceName string, state *terraform.State) (*terraform.ResourceState, error) {
rs, ok := state.RootModule().Resources[resourceName]
if !ok {
return nil, fmt.Errorf("Not found: %s", resourceName)
}
if rs.Primary.ID == "" {
return nil, fmt.Errorf("No libvirt resource key ID is set")
}
return rs, nil
}
// ** resource specifics helpers **
// getPoolFromTerraformState lookup pool by name and return the libvirt pool from a terraform state
func getPoolFromTerraformState(name string, state *terraform.State, virConn libvirt.Connect) (*libvirt.StoragePool, error) {
rs, err := getResourceFromTerraformState(name, state)
if err != nil {
return nil, err
}
pool, err := virConn.LookupStoragePoolByUUIDString(rs.Primary.ID)
if err != nil {
return nil, err
}
log.Printf("[DEBUG]:The ID is %s", rs.Primary.ID)
return pool, nil
}
// getVolumeFromTerraformState lookup volume by name and return the libvirt volume from a terraform state
func getVolumeFromTerraformState(name string, state *terraform.State, virConn libvirt.Connect) (*libvirt.StorageVol, error) {
rs, err := getResourceFromTerraformState(name, state)
if err != nil {
return nil, err
}
vol, err := virConn.LookupStorageVolByKey(rs.Primary.ID)
if err != nil {
return nil, err
}
log.Printf("[DEBUG]:The ID is %s", rs.Primary.ID)
return vol, nil
}
// helper used in network tests for retrieve xml network definition.
func getNetworkDef(state *terraform.State, name string, virConn libvirt.Connect) (*libvirtxml.Network, error) {
var network *libvirt.Network
rs, err := getResourceFromTerraformState(name, state)
if err != nil {
return nil, err
}
network, err = virConn.LookupNetworkByUUIDString(rs.Primary.ID)
if err != nil {
return nil, err
}
networkDef, err := getXMLNetworkDefFromLibvirt(network)
if err != nil {
return nil, fmt.Errorf("Error reading libvirt network XML description: %s", err)
}
return &networkDef, nil
}
// //////////////////////////////////////////////////////////////////
// network
// //////////////////////////////////////////////////////////////////
// testAccCheckNetworkExists checks that the network exists
func testAccCheckNetworkExists(name string, network *libvirt.Network) resource.TestCheckFunc {
return func(state *terraform.State) error {
rs, err := getResourceFromTerraformState(name, state)
if err != nil {
return err
}
virConn := testAccProvider.Meta().(*Client).libvirt
networkRetrived, err := virConn.LookupNetworkByUUIDString(rs.Primary.ID)
if err != nil {
return err
}
realID, err := networkRetrived.GetUUIDString()
if err != nil {
return err
}
if realID != rs.Primary.ID {
return fmt.Errorf("Libvirt network not found")
}
*network = *networkRetrived
return nil
}
}
// testAccCheckLibvirtNetworkDestroy checks that the network has been destroyed
func testAccCheckLibvirtNetworkDestroy(s *terraform.State) error {
virtConn := testAccProvider.Meta().(*Client).libvirt
for _, rs := range s.RootModule().Resources {
if rs.Type != "libvirt_network" {
continue
}
_, err := virtConn.LookupNetworkByUUIDString(rs.Primary.ID)
if err == nil {
return fmt.Errorf(
"Error waiting for network (%s) to be destroyed: %s",
rs.Primary.ID, err)
}
}
return nil
}
// testAccCheckDNSHosts checks the expected DNS hosts in a network
func testAccCheckDNSHosts(name string, expected []libvirtxml.NetworkDNSHost) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if networkDef.DNS == nil {
return fmt.Errorf("DNS block not found in networkDef")
}
actual := networkDef.DNS.Host
if len(expected) != len(actual) {
return fmt.Errorf("len(expected): %d != len(actual): %d", len(expected), len(actual))
}
for _, e := range expected {
found := false
for _, a := range actual {
if reflect.DeepEqual(a.IP, e.IP) && reflect.DeepEqual(a.Hostnames, e.Hostnames) {
found = true
break
}
}
if !found {
return fmt.Errorf("Unable to find:%v in: %v", e, actual)
}
}
return nil
}
}
// testAccCheckLibvirtNetworkDhcpStatus checks the expected DHCP status
func testAccCheckLibvirtNetworkDhcpStatus(name string, expectedDhcpStatus string) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if expectedDhcpStatus == "disabled" {
for _, ips := range networkDef.IPs {
// &libvirtxml.NetworkDHCP{..} should be nil when dhcp is disabled
if ips.DHCP != nil {
fmt.Printf("%#v", ips.DHCP)
return fmt.Errorf("the network should have DHCP disabled")
}
}
}
if expectedDhcpStatus == "enabled" {
for _, ips := range networkDef.IPs {
if ips.DHCP == nil {
return fmt.Errorf("the network should have DHCP enabled")
}
}
}
return nil
}
}
// testAccCheckLibvirtNetworkBridge checks the bridge exists and has the expected properties
func testAccCheckLibvirtNetworkBridge(resourceName string, bridgeName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, resourceName, *virConn)
if err != nil {
return err
}
if networkDef.Bridge == nil {
return fmt.Errorf("Bridge type of network should be not nil")
}
if networkDef.Bridge.Name != bridgeName {
fmt.Printf("%#v", networkDef)
return fmt.Errorf("fail: network brigde property were not set correctly")
}
return nil
}
}
// testAccCheckLibvirtNetworkDNSForwarders checks the DNS forwarders in the libvirt network
func testAccCheckLibvirtNetworkDNSForwarders(name string, expected []libvirtxml.NetworkDNSForwarder) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if networkDef.DNS == nil {
return fmt.Errorf("DNS block not found in networkDef")
}
actual := networkDef.DNS.Forwarders
if len(expected) != len(actual) {
return fmt.Errorf("len(expected): %d != len(actual): %d", len(expected), len(actual))
}
for _, e := range expected {
found := false
for _, a := range actual {
if reflect.DeepEqual(a, e) {
found = true
break
}
}
if !found {
return fmt.Errorf("Unable to find %v in %v", e, actual)
}
}
return nil
}
}
// testAccCheckLibvirtNetworkLocalOnly checks the local-only property of the Domain
func testAccCheckLibvirtNetworkLocalOnly(name string, expectLocalOnly bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if expectLocalOnly {
if networkDef.Domain == nil || networkDef.Domain.LocalOnly != "yes" {
return fmt.Errorf("networkDef.Domain.LocalOnly is not true")
}
} else {
if networkDef.Domain != nil && networkDef.Domain.LocalOnly != "no" {
return fmt.Errorf("networkDef.Domain.LocalOnly is true")
}
}
return nil
}
}
// testAccCheckLibvirtNetworkDNSEnable checks the dns-enable property of the Domain
func testAccCheckLibvirtNetworkDNSEnableOrDisable(name string, expectDNS bool) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if expectDNS {
if networkDef.DNS == nil || networkDef.DNS.Enable != "yes" {
return fmt.Errorf("networkDef.DNS.Enable is not true")
}
}
if !expectDNS {
if networkDef.DNS != nil && networkDef.DNS.Enable != "no" {
return fmt.Errorf("networkDef.DNS.Enable is true")
}
}
return nil
}
}
// testAccCheckDnsmasqOptions checks the expected Dnsmasq options in a network
func testAccCheckDnsmasqOptions(name string, expected []libvirtxml.NetworkDnsmasqOption) resource.TestCheckFunc {
return func(s *terraform.State) error {
virConn := testAccProvider.Meta().(*Client).libvirt
networkDef, err := getNetworkDef(s, name, *virConn)
if err != nil {
return err
}
if networkDef.DnsmasqOptions == nil {
return fmt.Errorf("DnsmasqOptions block not found in networkDef")
}
actual := networkDef.DnsmasqOptions.Option
if len(expected) != len(actual) {
return fmt.Errorf("len(expected): %d != len(actual): %d", len(expected), len(actual))
}
for _, e := range expected {
found := false
for _, a := range actual {
if reflect.DeepEqual(a.Value, e.Value) {
found = true
break
}
}
if !found {
return fmt.Errorf("Unable to find:%v in: %v", e, actual)
}
}
return nil
}
}
|
[
"\"LIBVIRT_DEFAULT_URI\"",
"\"TF_ACC\"",
"\"TF_LIBVIRT_DISABLE_PRIVILEGED_TESTS\""
] |
[] |
[
"TF_LIBVIRT_DISABLE_PRIVILEGED_TESTS",
"LIBVIRT_DEFAULT_URI",
"TF_ACC"
] |
[]
|
["TF_LIBVIRT_DISABLE_PRIVILEGED_TESTS", "LIBVIRT_DEFAULT_URI", "TF_ACC"]
|
go
| 3 | 0 | |
testing/deploy_utils.py
|
import argparse
import datetime
import json
import logging
import os
import shutil
import ssl
import tempfile
import time
import uuid
import requests
import yaml
from googleapiclient import discovery, errors
from kubernetes import client as k8s_client
from kubernetes.client import rest
from kubernetes.config import kube_config
from oauth2client.client import GoogleCredentials
from kubeflow.testing import test_util, util # pylint: disable=no-name-in-module
from testing import vm_util
def get_gcp_identity():
identity = util.run(["gcloud", "config", "get-value", "account"])
logging.info("Current GCP account: %s", identity)
return identity
def create_k8s_client():
# We need to load the kube config so that we can have credentials to
# talk to the APIServer.
util.load_kube_config(persist_config=False)
# Create an API client object to talk to the K8s master.
api_client = k8s_client.ApiClient()
return api_client
def _setup_test(api_client, run_label):
"""Create the namespace for the test.
Returns:
test_dir: The local test directory.
"""
api = k8s_client.CoreV1Api(api_client)
namespace = k8s_client.V1Namespace()
namespace.api_version = "v1"
namespace.kind = "Namespace"
namespace.metadata = k8s_client.V1ObjectMeta(
name=run_label, labels={
"app": "kubeflow-e2e-test",
})
try:
logging.info("Creating namespace %s", namespace.metadata.name)
namespace = api.create_namespace(namespace)
logging.info("Namespace %s created.", namespace.metadata.name)
except rest.ApiException as e:
if e.status == 409:
logging.info("Namespace %s already exists.", namespace.metadata.name)
else:
raise
return namespace
def setup_kubeflow_ks_app(dir, namespace, github_token, api_client):
"""Create a ksonnet app for Kubeflow"""
util.makedirs(dir)
logging.info("Using test directory: %s", dir)
namespace_name = namespace
namespace = _setup_test(api_client, namespace_name)
logging.info("Using namespace: %s", namespace)
if github_token:
logging.info("Setting GITHUB_TOKEN to %s.", github_token)
# Set a GITHUB_TOKEN so that we don't rate limited by GitHub;
# see: https://github.com/ksonnet/ksonnet/issues/233
os.environ["GITHUB_TOKEN"] = github_token
if not os.getenv("GITHUB_TOKEN"):
logging.warning("GITHUB_TOKEN not set; you will probably hit Github API "
"limits.")
# Initialize a ksonnet app.
app_name = "kubeflow-test-" + uuid.uuid4().hex[0:4]
util.run(
[
"ks",
"init",
app_name,
], cwd=dir)
app_dir = os.path.join(dir, app_name)
kubeflow_registry = "github.com/kubeflow/kubeflow/tree/master/kubeflow"
util.run(
["ks", "registry", "add", "kubeflow", kubeflow_registry], cwd=app_dir)
# Install required packages
packages = ["kubeflow/core", "kubeflow/tf-serving", "kubeflow/tf-job", "kubeflow/pytorch-job", "kubeflow/argo"]
# Instead of installing packages we edit the app.yaml file directly
#for p in packages:
# util.run(["ks", "pkg", "install", p], cwd=app_dir)
app_file = os.path.join(app_dir,"app.yaml")
with open(app_file) as f:
app_yaml = yaml.load(f)
libraries = {}
for pkg in packages:
pkg = pkg.split("/")[1]
libraries[pkg] = {'gitVersion':{'commitSha': 'fake', 'refSpec': 'fake'}, 'name': pkg, 'registry': "kubeflow"}
app_yaml['libraries'] = libraries
with open(app_file, "w") as f:
yaml.dump(app_yaml, f)
# Create vendor directory with a symlink to the src
# so that we use the code at the desired commit.
target_dir = os.path.join(app_dir, "vendor", "kubeflow")
REPO_ORG = "kubeflow"
REPO_NAME = "kubeflow"
REGISTRY_PATH = "kubeflow"
source = os.path.join(dir, "src", REPO_ORG, REPO_NAME,
REGISTRY_PATH)
logging.info("Creating link %s -> %s", target_dir, source)
os.symlink(source, target_dir)
return app_dir
def log_operation_status(operation):
"""A callback to use with wait_for_operation."""
name = operation.get("name", "")
status = operation.get("status", "")
logging.info("Operation %s status %s", name, status)
def wait_for_operation(client,
project,
op_id,
timeout=datetime.timedelta(hours=1),
polling_interval=datetime.timedelta(seconds=5),
status_callback=log_operation_status):
"""Wait for the specified operation to complete.
Args:
client: Client for the API that owns the operation.
project: project
op_id: Operation id.
timeout: A datetime.timedelta expressing the amount of time to wait before
giving up.
polling_interval: A datetime.timedelta to represent the amount of time to
wait between requests polling for the operation status.
Returns:
op: The final operation.
Raises:
TimeoutError: if we timeout waiting for the operation to complete.
"""
endtime = datetime.datetime.now() + timeout
while True:
try:
op = client.operations().get(
project=project, operation=op_id).execute()
if status_callback:
status_callback(op)
status = op.get("status", "")
# Need to handle other status's
if status == "DONE":
return op
except ssl.SSLError as e:
logging.error("Ignoring error %s", e)
if datetime.datetime.now() > endtime:
raise TimeoutError(
"Timed out waiting for op: {0} to complete.".format(op_id))
time.sleep(polling_interval.total_seconds())
# Linter complains if we don't have a return here even though its unreachable.
return None
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
frontend/app.py
|
import os
import requests
import json
from flask import Flask, render_template
app = Flask(__name__)
app.secret_key = '3c0716f88780d6d642330dfa3c96dbca' # md5 -s incremental-istio
if os.environ.get('ENABLE_TRACING', None) is not None:
from opencensus.ext.stackdriver import trace_exporter as stackdriver_exporter
from opencensus.ext.flask.flask_middleware import FlaskMiddleware
from opencensus.trace import config_integration
project = os.environ.get('PROJECT_ID')
exporter = stackdriver_exporter.StackdriverExporter(project_id=project)
exporter = stackdriver_exporter.StackdriverExporter()
middleware = FlaskMiddleware(app, exporter=exporter)
config_integration.trace_integrations(["requests"])
BASE_URL = 'http://{hostport}'.format(
hostport=os.environ.get('BACKEND_HOSTPORT', 'localhost:5000')
)
@app.route('/', methods=['GET'])
def index():
resp = requests.get(BASE_URL + '/api/weather')
if resp.status_code is not 200:
return render_template('index.html', cities=[], code=resp.status_code, msg=resp.text)
else:
return render_template('index.html', cities=resp.json(), msg=None)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
[] |
[] |
[
"ENABLE_TRACING",
"PORT",
"BACKEND_HOSTPORT",
"PROJECT_ID"
] |
[]
|
["ENABLE_TRACING", "PORT", "BACKEND_HOSTPORT", "PROJECT_ID"]
|
python
| 4 | 0 | |
vendor/github.com/docker/cli/cli/command/cli.go
|
package command
import (
"io"
"net"
"net/http"
"os"
"runtime"
"time"
"github.com/docker/cli/cli"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
cliflags "github.com/docker/cli/cli/flags"
"github.com/docker/cli/cli/trust"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
"github.com/docker/docker/client"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/theupdateframework/notary"
notaryclient "github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/passphrase"
"golang.org/x/net/context"
)
// Streams is an interface which exposes the standard input and output streams
type Streams interface {
In() *InStream
Out() *OutStream
Err() io.Writer
}
// Cli represents the docker command line client.
type Cli interface {
Client() client.APIClient
Out() *OutStream
Err() io.Writer
In() *InStream
SetIn(in *InStream)
ConfigFile() *configfile.ConfigFile
ServerInfo() ServerInfo
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
}
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
in *InStream
out *OutStream
err io.Writer
client client.APIClient
defaultVersion string
server ServerInfo
}
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
func (cli *DockerCli) DefaultVersion() string {
return cli.defaultVersion
}
// Client returns the APIClient
func (cli *DockerCli) Client() client.APIClient {
return cli.client
}
// Out returns the writer used for stdout
func (cli *DockerCli) Out() *OutStream {
return cli.out
}
// Err returns the writer used for stderr
func (cli *DockerCli) Err() io.Writer {
return cli.err
}
// SetIn sets the reader used for stdin
func (cli *DockerCli) SetIn(in *InStream) {
cli.in = in
}
// In returns the reader used for stdin
func (cli *DockerCli) In() *InStream {
return cli.in
}
// ShowHelp shows the command help.
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
cmd.SetOutput(err)
cmd.HelpFunc()(cmd, args)
return nil
}
}
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
return cli.configFile
}
// ServerInfo returns the server version details for the host this client is
// connected to
func (cli *DockerCli) ServerInfo() ServerInfo {
return cli.server
}
// Initialize the dockerCli runs initialization that must happen after command
// line flags are parsed.
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
var err error
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
if tlsconfig.IsErrEncryptedKey(err) {
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
newClient := func(password string) (client.APIClient, error) {
opts.Common.TLSOptions.Passphrase = password
return NewAPIClientFromFlags(opts.Common, cli.configFile)
}
cli.client, err = getClientWithPassword(passRetriever, newClient)
}
if err != nil {
return err
}
cli.initializeFromClient()
return nil
}
func (cli *DockerCli) initializeFromClient() {
cli.defaultVersion = cli.client.ClientVersion()
ping, err := cli.client.Ping(context.Background())
if err != nil {
// Default to true if we fail to connect to daemon
cli.server = ServerInfo{HasExperimental: true}
if ping.APIVersion != "" {
cli.client.NegotiateAPIVersionPing(ping)
}
return
}
cli.server = ServerInfo{
HasExperimental: ping.Experimental,
OSType: ping.OSType,
}
cli.client.NegotiateAPIVersionPing(ping)
}
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
for attempts := 0; ; attempts++ {
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
if giveup || err != nil {
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
}
apiclient, err := newClient(passwd)
if !tlsconfig.IsErrEncryptedKey(err) {
return apiclient, err
}
}
}
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {
HasExperimental bool
OSType string
}
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
customHeaders := configFile.HTTPHeaders
if customHeaders == nil {
customHeaders = map[string]string{}
}
customHeaders["User-Agent"] = UserAgent()
verStr := api.DefaultVersion
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
verStr = tmpStr
}
httpClient, err := newHTTPClient(host, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
return client.NewClient(host, verStr, httpClient, customHeaders)
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
var host string
switch len(hosts) {
case 0:
host = os.Getenv("DOCKER_HOST")
case 1:
host = hosts[0]
default:
return "", errors.New("Please specify only one -H")
}
return dopts.ParseHost(tlsOptions != nil, host)
}
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
if tlsOptions == nil {
// let the api client configure the default transport.
return nil, nil
}
opts := *tlsOptions
opts.ExclusiveRootPools = true
config, err := tlsconfig.Client(opts)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
DialContext: (&net.Dialer{
KeepAlive: 30 * time.Second,
Timeout: 30 * time.Second,
}).DialContext,
}
proto, addr, _, err := client.ParseHost(host)
if err != nil {
return nil, err
}
sockets.ConfigureTransport(tr, proto, addr)
return &http.Client{
Transport: tr,
CheckRedirect: client.CheckRedirect,
}, nil
}
// UserAgent returns the user agent string used for making API requests
func UserAgent() string {
return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")"
}
|
[
"\"DOCKER_API_VERSION\"",
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST",
"DOCKER_API_VERSION"
] |
[]
|
["DOCKER_HOST", "DOCKER_API_VERSION"]
|
go
| 2 | 0 | |
server/routes/callback/callback.go
|
package callback
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"github.com/pbtrung/Skypiea/server/app"
"golang.org/x/oauth2"
)
func CallbackHandler(w http.ResponseWriter, r *http.Request) {
domain := os.Getenv("AUTH0_DOMAIN")
conf := &oauth2.Config{
ClientID: os.Getenv("AUTH0_CLIENT_ID"),
ClientSecret: os.Getenv("AUTH0_CLIENT_SECRET"),
RedirectURL: os.Getenv("AUTH0_CALLBACK_URL"),
Scopes: []string{"openid", "profile"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://" + domain + "/authorize",
TokenURL: "https://" + domain + "/oauth/token",
},
}
code := r.URL.Query().Get("code")
token, err := conf.Exchange(oauth2.NoContext, code)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Getting now the userInfo
client := conf.Client(oauth2.NoContext, token)
resp, err := client.Get("https://" + domain + "/userinfo")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
raw, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var profile map[string]interface{}
if err = json.Unmarshal(raw, &profile); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session, err := app.Store.Get(r, "auth0-session")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["id_token"] = token.Extra("id_token")
session.Values["access_token"] = token.AccessToken
session.Values["is_authenticated"] = true
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Redirect to logged in page
http.Redirect(w, r, "/libraries", http.StatusSeeOther)
}
|
[
"\"AUTH0_DOMAIN\"",
"\"AUTH0_CLIENT_ID\"",
"\"AUTH0_CLIENT_SECRET\"",
"\"AUTH0_CALLBACK_URL\""
] |
[] |
[
"AUTH0_DOMAIN",
"AUTH0_CALLBACK_URL",
"AUTH0_CLIENT_SECRET",
"AUTH0_CLIENT_ID"
] |
[]
|
["AUTH0_DOMAIN", "AUTH0_CALLBACK_URL", "AUTH0_CLIENT_SECRET", "AUTH0_CLIENT_ID"]
|
go
| 4 | 0 | |
python_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py
|
import os
import pytest
from dagster_aws.emr import EmrJobRunner, emr_pyspark_resource
from dagster_pyspark import pyspark_resource, pyspark_solid
from moto import mock_emr
from dagster import DagsterInvalidDefinitionError, ModeDefinition, execute_pipeline, pipeline
from dagster.seven import mock
from dagster.utils.test import create_test_pipeline_execution_context
@pyspark_solid
def example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pyspark_solid(name='blah', description='this is a test', config={'foo': str, 'bar': int})
def other_example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pipeline(
mode_defs=[
ModeDefinition('prod', resource_defs={'pyspark': emr_pyspark_resource}),
ModeDefinition('local', resource_defs={'pyspark': pyspark_resource}),
]
)
def example_pipe():
example_solid()
other_example_solid()
def test_local():
result = execute_pipeline(
pipeline=example_pipe,
mode='local',
environment_dict={'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},},
)
assert result.success
@mock_emr
@mock.patch('dagster_aws.emr.emr.EmrJobRunner.wait_for_steps_to_complete')
def test_pyspark_emr(mock_wait):
run_job_flow_args = dict(
Instances={
'InstanceCount': 1,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-west-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True,
)
# Doing cluster setup outside of a solid here, because run_job_flow is not yet plumbed through
# to the pyspark EMR resource.
job_runner = EmrJobRunner(region='us-west-1')
context = create_test_pipeline_execution_context()
cluster_id = job_runner.run_job_flow(context, run_job_flow_args)
result = execute_pipeline(
pipeline=example_pipe,
mode='prod',
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': cluster_id,
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
)
assert result.success
assert mock_wait.called_once
def test_bad_requirements_txt():
with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
execute_pipeline(
pipeline=example_pipe,
mode='prod',
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'requirements_file_path': 'DOES_NOT_EXIST',
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': 'some_cluster_id',
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
)
assert 'The requirements.txt file that was specified does not exist' in str(exc_info.value)
@pytest.mark.skipif(
'AWS_EMR_TEST_DO_IT_LIVE' not in os.environ,
reason='This test is slow and requires a live EMR cluster; run only upon explicit request',
)
def test_do_it_live_emr():
result = execute_pipeline(
pipeline=example_pipe,
mode='prod',
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': os.environ.get('AWS_EMR_JOB_FLOW_ID'),
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
'wait_for_logs': True,
}
}
},
},
)
assert result.success
|
[] |
[] |
[
"AWS_EMR_JOB_FLOW_ID"
] |
[]
|
["AWS_EMR_JOB_FLOW_ID"]
|
python
| 1 | 0 | |
src/cmd/compile/internal/gc/ssa.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"bytes"
"fmt"
"html"
"os"
"strings"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/sys"
)
var ssaEnabled = true
var ssaConfig *ssa.Config
var ssaExp ssaExport
func initssa() *ssa.Config {
ssaExp.unimplemented = false
ssaExp.mustImplement = true
if ssaConfig == nil {
ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
if Thearch.LinkArch.Name == "386" {
ssaConfig.Set387(Thearch.Use387)
}
}
return ssaConfig
}
func shouldssa(fn *Node) bool {
switch Thearch.LinkArch.Name {
default:
// Only available for testing.
if os.Getenv("SSATEST") == "" {
return false
}
case "amd64", "amd64p32", "arm", "386", "arm64":
// Generally available.
}
if !ssaEnabled {
return false
}
// Environment variable control of SSA CG
// 1. IF GOSSAFUNC == current function name THEN
// compile this function with SSA and log output to ssa.html
// 2. IF GOSSAHASH == "" THEN
// compile this function (and everything else) with SSA
// 3. IF GOSSAHASH == "n" or "N"
// IF GOSSAPKG == current package name THEN
// compile this function (and everything in this package) with SSA
// ELSE
// use the old back end for this function.
// This is for compatibility with existing test harness and should go away.
// 4. IF GOSSAHASH is a suffix of the binary-rendered SHA1 hash of the function name THEN
// compile this function with SSA
// ELSE
// compile this function with the old back end.
// Plan is for 3 to be removed when the tests are revised.
// SSA is now default, and is disabled by setting
// GOSSAHASH to n or N, or selectively with strings of
// 0 and 1.
name := fn.Func.Nname.Sym.Name
funcname := os.Getenv("GOSSAFUNC")
if funcname != "" {
// If GOSSAFUNC is set, compile only that function.
return name == funcname
}
pkg := os.Getenv("GOSSAPKG")
if pkg != "" {
// If GOSSAPKG is set, compile only that package.
return localpkg.Name == pkg
}
return initssa().DebugHashMatch("GOSSAHASH", name)
}
// buildssa builds an SSA function.
func buildssa(fn *Node) *ssa.Func {
name := fn.Func.Nname.Sym.Name
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
dumplist("buildssa-enter", fn.Func.Enter)
dumplist("buildssa-body", fn.Nbody)
dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.pushLine(fn.Lineno)
defer s.popLine()
if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
if fn.Func.Pragma&Nowritebarrier != 0 {
s.noWB = true
}
defer func() {
if s.WBLineno != 0 {
fn.Func.WBLineno = s.WBLineno
}
}()
// TODO(khr): build config just once at the start of the compiler binary
ssaExp.log = printssa
s.config = initssa()
s.f = s.config.NewFunc()
s.f.Name = name
s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{}
if name == os.Getenv("GOSSAFUNC") {
// TODO: tempfile? it is handy to have the location
// of this file be stable, so you can just reload in the browser.
s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
// TODO: generate and print a mapping from nodes to values and blocks
}
defer func() {
if !printssa {
s.config.HTML.Close()
}
}()
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
s.startBlock(s.f.Entry)
s.vars[&memVar] = s.startmem
s.varsyms = map[*Node]interface{}{}
// Generate addresses of local declarations
s.decladdrs = map[*Node]*ssa.Value{}
for _, n := range fn.Func.Dcl {
switch n.Class {
case PPARAM, PPARAMOUT:
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
if n.Class == PPARAMOUT && s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
// the function.
s.returns = append(s.returns, n)
}
if n.Class == PPARAM && s.canSSA(n) && n.Type.IsPtrShaped() {
s.ptrargs = append(s.ptrargs, n)
n.SetNotLiveAtEnd(true) // SSA takes care of this explicitly
}
case PAUTO:
// processed at each use, to prevent Addr coming
// before the decl.
case PAUTOHEAP:
// moved to heap - already handled by frontend
case PFUNC:
// local function - already handled by frontend
default:
s.Unimplementedf("local variable with class %s unimplemented", classnames[n.Class])
}
}
// Convert the AST-based IR to the SSA-based IR
s.stmts(fn.Func.Enter)
s.stmts(fn.Nbody)
// fallthrough to exit
if s.curBlock != nil {
s.pushLine(fn.Func.Endlineno)
s.exit()
s.popLine()
}
// Check that we used all labels
for name, lab := range s.labels {
if !lab.used() && !lab.reported {
yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
lab.reported = true
}
if lab.used() && !lab.defined() && !lab.reported {
yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
lab.reported = true
}
}
// Check any forward gotos. Non-forward gotos have already been checked.
for _, n := range s.fwdGotos {
lab := s.labels[n.Left.Sym.Name]
// If the label is undefined, we have already have printed an error.
if lab.defined() {
s.checkgoto(n, lab.defNode)
}
}
if nerrors > 0 {
s.f.Free()
return nil
}
prelinkNumvars := s.f.NumValues()
sparseDefState := s.locatePotentialPhiFunctions(fn)
// Link up variable uses to variable definitions
s.linkForwardReferences(sparseDefState)
if ssa.BuildStats > 0 {
s.f.LogStat("build", s.f.NumBlocks(), "blocks", prelinkNumvars, "vars_before",
s.f.NumValues(), "vars_after", prelinkNumvars*s.f.NumBlocks(), "ssa_phi_loc_cutoff_score")
}
// Don't carry reference this around longer than necessary
s.exitCode = Nodes{}
// Main call to ssa package to compile function
ssa.Compile(s.f)
return s.f
}
type state struct {
// configuration (arch) information
config *ssa.Config
// function we're building
f *ssa.Func
// labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
labeledNodes map[*Node]*ssaLabel
// gotos that jump forward; required for deferred checkgoto calls
fwdGotos []*Node
// Code that must precede any return
// (e.g., copying value of heap-escaped paramout back to true paramout)
exitCode Nodes
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
continueTo *ssa.Block // current target for plain continue statement
// current location where we're interpreting the AST
curBlock *ssa.Block
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
vars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
decladdrs map[*Node]*ssa.Value
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{}
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
// line number stack. The current line number is top of stack
line []int32
// list of panic calls by function name and line number.
// Used to deduplicate panic calls.
panics map[funcLine]*ssa.Block
// list of FwdRef values.
fwdRefs []*ssa.Value
// list of PPARAMOUT (return) variables.
returns []*Node
// list of PPARAM SSA-able pointer-shaped args. We ensure these are live
// throughout the function to help users avoid premature finalizers.
ptrargs []*Node
cgoUnsafeArgs bool
noWB bool
WBLineno int32 // line number of first write barrier. 0=no write barriers
}
type funcLine struct {
f *Node
line int32
}
type ssaLabel struct {
target *ssa.Block // block identified by this label
breakTarget *ssa.Block // block to break to in control flow node identified by this label
continueTarget *ssa.Block // block to continue to in control flow node identified by this label
defNode *Node // label definition Node (OLABEL)
// Label use Node (OGOTO, OBREAK, OCONTINUE).
// Used only for error detection and reporting.
// There might be multiple uses, but we only need to track one.
useNode *Node
reported bool // reported indicates whether an error has already been reported for this label
}
// defined reports whether the label has a definition (OLABEL node).
func (l *ssaLabel) defined() bool { return l.defNode != nil }
// used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
func (l *ssaLabel) used() bool { return l.useNode != nil }
// label returns the label associated with sym, creating it if necessary.
func (s *state) label(sym *Sym) *ssaLabel {
lab := s.labels[sym.Name]
if lab == nil {
lab = new(ssaLabel)
s.labels[sym.Name] = lab
}
return lab
}
func (s *state) Logf(msg string, args ...interface{}) { s.config.Logf(msg, args...) }
func (s *state) Log() bool { return s.config.Log() }
func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(s.peekLine(), msg, args...) }
func (s *state) Unimplementedf(msg string, args ...interface{}) {
s.config.Unimplementedf(s.peekLine(), msg, args...)
}
func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
func (s *state) Debug_checknil() bool { return s.config.Debug_checknil() }
var (
// dummy node for the memory variable
memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
// dummy nodes for temporary variables
ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}}
okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}}
)
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
s.vars = map[*Node]*ssa.Value{}
}
// endBlock marks the end of generating code for the current block.
// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
if b == nil {
return nil
}
for len(s.defvars) <= int(b.ID) {
s.defvars = append(s.defvars, nil)
}
s.defvars[b.ID] = s.vars
s.curBlock = nil
s.vars = nil
b.Line = s.peekLine()
return b
}
// pushLine pushes a line number on the line number stack.
func (s *state) pushLine(line int32) {
if line == 0 {
// the frontend may emit node with line number missing,
// use the parent line number in this case.
line = s.peekLine()
if Debug['K'] != 0 {
Warn("buildssa: line 0")
}
}
s.line = append(s.line, line)
}
// popLine pops the top of the line number stack.
func (s *state) popLine() {
s.line = s.line[:len(s.line)-1]
}
// peekLine peek the top of the line number stack.
func (s *state) peekLine() int32 {
return s.line[len(s.line)-1]
}
func (s *state) Error(msg string, args ...interface{}) {
yyerrorl(s.peekLine(), msg, args...)
}
// newValue0 adds a new value with no arguments to the current block.
func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.curBlock.NewValue0(s.peekLine(), op, t)
}
// newValue0A adds a new value with no arguments and an aux value to the current block.
func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
}
// newValue0I adds a new value with no arguments and an auxint value to the current block.
func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
}
// newValue1 adds a new value with one argument to the current block.
func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
}
// newValue1A adds a new value with one argument and an aux value to the current block.
func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// newValue1I adds a new value with one argument and an auxint value to the current block.
func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
}
// newValue2 adds a new value with two arguments to the current block.
func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// newValue2I adds a new value with two arguments and an auxint value to the current block.
func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
}
// newValue3 adds a new value with three arguments to the current block.
func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
}
// newValue3I adds a new value with three arguments and an auxint value to the current block.
func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
}
// entryNewValue0 adds a new value with no arguments to the entry block.
func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
return s.f.Entry.NewValue0(s.peekLine(), op, t)
}
// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
}
// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
}
// entryNewValue1 adds a new value with one argument to the entry block.
func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
}
// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
}
// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
}
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
}
// const* routines add a new const value to the entry block.
func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
}
func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
return s.f.ConstInt8(s.peekLine(), t, c)
}
func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
return s.f.ConstInt16(s.peekLine(), t, c)
}
func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
return s.f.ConstInt32(s.peekLine(), t, c)
}
func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
return s.f.ConstInt64(s.peekLine(), t, c)
}
func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat32(s.peekLine(), t, c)
}
func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
return s.f.ConstFloat64(s.peekLine(), t, c)
}
func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
if s.config.IntSize == 8 {
return s.constInt64(t, c)
}
if int64(int32(c)) != c {
s.Fatalf("integer constant too big %d", c)
}
return s.constInt32(t, int32(c))
}
func (s *state) stmts(a Nodes) {
for _, x := range a.Slice() {
s.stmt(x)
}
}
// ssaStmtList converts the statement n to SSA and adds it to s.
func (s *state) stmtList(l Nodes) {
for _, n := range l.Slice() {
s.stmt(n)
}
}
// ssaStmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
s.pushLine(n.Lineno)
defer s.popLine()
// If s.curBlock is nil, then we're about to generate dead code.
// We can't just short-circuit here, though,
// because we check labels and gotos as part of SSA generation.
// Provide a block for the dead code so that we don't have
// to add special cases everywhere else.
if s.curBlock == nil {
dead := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(dead)
}
s.stmtList(n.Ninit)
switch n.Op {
case OBLOCK:
s.stmtList(n.List)
// No-ops
case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
// Expression statements
case OCALLFUNC, OCALLMETH, OCALLINTER:
s.call(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC &&
(compiling_runtime && n.Left.Sym.Name == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
b.SetControl(m)
// TODO: never rewrite OPANIC to OCALLFUNC in the
// first place. Need to wait until all backends
// go through SSA.
}
case ODEFER:
s.call(n.Left, callDefer)
case OPROC:
s.call(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Rlist.First(), true)
s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno, 0, false)
s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false)
return
case ODCL:
if n.Left.Class == PAUTOHEAP {
Fatalf("DCL %v", n)
}
case OLABEL:
sym := n.Left.Sym
if isblanksym(sym) {
// Empty identifier is valid but useless.
// See issues 11589, 11593.
return
}
lab := s.label(sym)
// Associate label with its control flow node, if any
if ctl := n.Name.Defn; ctl != nil {
switch ctl.Op {
case OFOR, OSWITCH, OSELECT:
s.labeledNodes[ctl] = lab
}
}
if !lab.defined() {
lab.defNode = n
} else {
s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
lab.reported = true
}
// The label might already have a target block via a goto.
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
// go to that label (we pretend "label:" is preceded by "goto label")
b := s.endBlock()
b.AddEdgeTo(lab.target)
s.startBlock(lab.target)
case OGOTO:
sym := n.Left.Sym
lab := s.label(sym)
if lab.target == nil {
lab.target = s.f.NewBlock(ssa.BlockPlain)
}
if !lab.used() {
lab.useNode = n
}
if lab.defined() {
s.checkgoto(n, lab.defNode)
} else {
s.fwdGotos = append(s.fwdGotos, n)
}
b := s.endBlock()
b.AddEdgeTo(lab.target)
case OAS, OASWB:
// Check whether we can generate static data rather than code.
// If so, ignore n and defer data generation until codegen.
// Failure to do this causes writes to readonly symbols.
if gen_as_init(n, true) {
var data []*Node
if s.f.StaticData != nil {
data = s.f.StaticData.([]*Node)
}
s.f.StaticData = append(data, n)
return
}
if n.Left == n.Right && n.Left.Op == ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
// VARDEF x
// COPY x -> x
// which is bad because x is incorrectly considered
// dead before the vardef. See issue #14904.
return
}
var t *Type
if n.Right != nil {
t = n.Right.Type
} else {
t = n.Left.Type
}
// Evaluate RHS.
rhs := n.Right
if rhs != nil {
switch rhs.Op {
case OSTRUCTLIT, OARRAYLIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
rhs = nil
case OAPPEND:
// If we're writing the result of an append back to the same slice,
// handle it specially to avoid write barriers on the fast (non-growth) path.
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) {
s.append(rhs, true)
return
}
}
}
var r *ssa.Value
var isVolatile bool
needwb := n.Op == OASWB && rhs != nil
deref := !canSSAType(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
} else {
r, isVolatile = s.addr(rhs, false)
}
} else {
if rhs == nil {
r = s.zeroVal(t)
} else {
r = s.expr(rhs)
}
}
if rhs != nil && rhs.Op == OAPPEND {
// The frontend gets rid of the write barrier to enable the special OAPPEND
// handling above, but since this is not a special case, we need it.
// TODO: just add a ptr graying to the end of growslice?
// TODO: check whether we need to provide special handling and a write barrier
// for ODOTTYPE and ORECV also.
// They get similar wb-removal treatment in walk.go:OAS.
needwb = true
}
var skip skipMask
if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
// [0:...] is the same as [:...]
i = nil
}
// TODO: detect defaults for len/cap also.
// Currently doesn't really work because (*p)[:len(*p)] appears here as:
// tmp = len(*p)
// (*p)[:tmp]
//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
// j = nil
//}
//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
// k = nil
//}
if i == nil {
skip |= skipPtr
if j == nil {
skip |= skipLen
}
if k == nil {
skip |= skipCap
}
}
}
s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile)
case OIF:
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, n.Likely)
} else {
s.condBranch(n.Left, bThen, bEnd, n.Likely)
}
s.startBlock(bThen)
s.stmts(n.Nbody)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
s.startBlock(bEnd)
case ORETURN:
s.stmtList(n.List)
s.exit()
case ORETJMP:
s.stmtList(n.List)
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
b.Aux = n.Left.Sym
case OCONTINUE, OBREAK:
var op string
var to *ssa.Block
switch n.Op {
case OCONTINUE:
op = "continue"
to = s.continueTo
case OBREAK:
op = "break"
to = s.breakTo
}
if n.Left == nil {
// plain break/continue
if to == nil {
s.Error("%s is not in a loop", op)
return
}
// nothing to do; "to" is already the correct target
} else {
// labeled break/continue; look up the target
sym := n.Left.Sym
lab := s.label(sym)
if !lab.used() {
lab.useNode = n.Left
}
if !lab.defined() {
s.Error("%s label not defined: %v", op, sym)
lab.reported = true
return
}
switch n.Op {
case OCONTINUE:
to = lab.continueTarget
case OBREAK:
to = lab.breakTarget
}
if to == nil {
// Valid label but not usable with a break/continue here, e.g.:
// for {
// continue abc
// }
// abc:
// for {}
s.Error("invalid %s label %v", op, sym)
lab.reported = true
return
}
}
b := s.endBlock()
b.AddEdgeTo(to)
case OFOR:
// OFOR: for Ninit; Left; Right { Nbody }
bCond := s.f.NewBlock(ssa.BlockPlain)
bBody := s.f.NewBlock(ssa.BlockPlain)
bIncr := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
// first, jump to condition test
b := s.endBlock()
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
if n.Left != nil {
s.condBranch(n.Left, bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
b.AddEdgeTo(bBody)
}
// set up for continue/break in body
prevContinue := s.continueTo
prevBreak := s.breakTo
s.continueTo = bIncr
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled for loop
lab.continueTarget = bIncr
lab.breakTarget = bEnd
}
// generate body
s.startBlock(bBody)
s.stmts(n.Nbody)
// tear down continue/break
s.continueTo = prevContinue
s.breakTo = prevBreak
if lab != nil {
lab.continueTarget = nil
lab.breakTarget = nil
}
// done with body, goto incr
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bIncr)
}
// generate incr
s.startBlock(bIncr)
if n.Right != nil {
s.stmt(n.Right)
}
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
}
s.startBlock(bEnd)
case OSWITCH, OSELECT:
// These have been mostly rewritten by the front end into their Nbody fields.
// Our main task is to correctly hook up any break statements.
bEnd := s.f.NewBlock(ssa.BlockPlain)
prevBreak := s.breakTo
s.breakTo = bEnd
lab := s.labeledNodes[n]
if lab != nil {
// labeled
lab.breakTarget = bEnd
}
// generate body code
s.stmts(n.Nbody)
s.breakTo = prevBreak
if lab != nil {
lab.breakTarget = nil
}
// OSWITCH never falls through (s.curBlock == nil here).
// OSELECT does not fall through if we're calling selectgo.
// OSELECT does fall through if we're calling selectnb{send,recv}[2].
// In those latter cases, go to the code after the select.
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
s.startBlock(bEnd)
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
if !s.canSSA(n.Left) {
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
}
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken {
s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
}
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
case OCHECKNIL:
p := s.expr(n.Left)
s.nilCheck(p)
default:
s.Unimplementedf("unhandled stmt %s", n.Op)
}
}
// exit processes any code that needs to be generated just before returning.
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
if hasdefer {
s.rtcall(Deferreturn, true, nil)
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmts(s.exitCode)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
val := s.variable(n, n.Type)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
}
// Keep input pointer args live until the return. This is a bandaid
// fix for 1.7 for what will become in 1.8 explicit runtime.KeepAlive calls.
// For <= 1.7 we guarantee that pointer input arguments live to the end of
// the function to prevent premature (from the user's point of view)
// execution of finalizers. See issue 15277.
// TODO: remove for 1.8?
for _, n := range s.ptrargs {
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
}
// Do actual return.
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockRet
b.SetControl(m)
return b
}
type opAndType struct {
op Op
etype EType
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{OADD, TINT8}: ssa.OpAdd8,
opAndType{OADD, TUINT8}: ssa.OpAdd8,
opAndType{OADD, TINT16}: ssa.OpAdd16,
opAndType{OADD, TUINT16}: ssa.OpAdd16,
opAndType{OADD, TINT32}: ssa.OpAdd32,
opAndType{OADD, TUINT32}: ssa.OpAdd32,
opAndType{OADD, TPTR32}: ssa.OpAdd32,
opAndType{OADD, TINT64}: ssa.OpAdd64,
opAndType{OADD, TUINT64}: ssa.OpAdd64,
opAndType{OADD, TPTR64}: ssa.OpAdd64,
opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
opAndType{OSUB, TINT8}: ssa.OpSub8,
opAndType{OSUB, TUINT8}: ssa.OpSub8,
opAndType{OSUB, TINT16}: ssa.OpSub16,
opAndType{OSUB, TUINT16}: ssa.OpSub16,
opAndType{OSUB, TINT32}: ssa.OpSub32,
opAndType{OSUB, TUINT32}: ssa.OpSub32,
opAndType{OSUB, TINT64}: ssa.OpSub64,
opAndType{OSUB, TUINT64}: ssa.OpSub64,
opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
opAndType{ONOT, TBOOL}: ssa.OpNot,
opAndType{OMINUS, TINT8}: ssa.OpNeg8,
opAndType{OMINUS, TUINT8}: ssa.OpNeg8,
opAndType{OMINUS, TINT16}: ssa.OpNeg16,
opAndType{OMINUS, TUINT16}: ssa.OpNeg16,
opAndType{OMINUS, TINT32}: ssa.OpNeg32,
opAndType{OMINUS, TUINT32}: ssa.OpNeg32,
opAndType{OMINUS, TINT64}: ssa.OpNeg64,
opAndType{OMINUS, TUINT64}: ssa.OpNeg64,
opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
opAndType{OCOM, TINT8}: ssa.OpCom8,
opAndType{OCOM, TUINT8}: ssa.OpCom8,
opAndType{OCOM, TINT16}: ssa.OpCom16,
opAndType{OCOM, TUINT16}: ssa.OpCom16,
opAndType{OCOM, TINT32}: ssa.OpCom32,
opAndType{OCOM, TUINT32}: ssa.OpCom32,
opAndType{OCOM, TINT64}: ssa.OpCom64,
opAndType{OCOM, TUINT64}: ssa.OpCom64,
opAndType{OIMAG, TCOMPLEX64}: ssa.OpComplexImag,
opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
opAndType{OREAL, TCOMPLEX64}: ssa.OpComplexReal,
opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
opAndType{OMUL, TINT8}: ssa.OpMul8,
opAndType{OMUL, TUINT8}: ssa.OpMul8,
opAndType{OMUL, TINT16}: ssa.OpMul16,
opAndType{OMUL, TUINT16}: ssa.OpMul16,
opAndType{OMUL, TINT32}: ssa.OpMul32,
opAndType{OMUL, TUINT32}: ssa.OpMul32,
opAndType{OMUL, TINT64}: ssa.OpMul64,
opAndType{OMUL, TUINT64}: ssa.OpMul64,
opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
opAndType{OHMUL, TINT8}: ssa.OpHmul8,
opAndType{OHMUL, TUINT8}: ssa.OpHmul8u,
opAndType{OHMUL, TINT16}: ssa.OpHmul16,
opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
opAndType{OHMUL, TINT32}: ssa.OpHmul32,
opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
opAndType{ODIV, TINT8}: ssa.OpDiv8,
opAndType{ODIV, TUINT8}: ssa.OpDiv8u,
opAndType{ODIV, TINT16}: ssa.OpDiv16,
opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
opAndType{ODIV, TINT32}: ssa.OpDiv32,
opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
opAndType{ODIV, TINT64}: ssa.OpDiv64,
opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
opAndType{OMOD, TINT8}: ssa.OpMod8,
opAndType{OMOD, TUINT8}: ssa.OpMod8u,
opAndType{OMOD, TINT16}: ssa.OpMod16,
opAndType{OMOD, TUINT16}: ssa.OpMod16u,
opAndType{OMOD, TINT32}: ssa.OpMod32,
opAndType{OMOD, TUINT32}: ssa.OpMod32u,
opAndType{OMOD, TINT64}: ssa.OpMod64,
opAndType{OMOD, TUINT64}: ssa.OpMod64u,
opAndType{OAND, TINT8}: ssa.OpAnd8,
opAndType{OAND, TUINT8}: ssa.OpAnd8,
opAndType{OAND, TINT16}: ssa.OpAnd16,
opAndType{OAND, TUINT16}: ssa.OpAnd16,
opAndType{OAND, TINT32}: ssa.OpAnd32,
opAndType{OAND, TUINT32}: ssa.OpAnd32,
opAndType{OAND, TINT64}: ssa.OpAnd64,
opAndType{OAND, TUINT64}: ssa.OpAnd64,
opAndType{OOR, TINT8}: ssa.OpOr8,
opAndType{OOR, TUINT8}: ssa.OpOr8,
opAndType{OOR, TINT16}: ssa.OpOr16,
opAndType{OOR, TUINT16}: ssa.OpOr16,
opAndType{OOR, TINT32}: ssa.OpOr32,
opAndType{OOR, TUINT32}: ssa.OpOr32,
opAndType{OOR, TINT64}: ssa.OpOr64,
opAndType{OOR, TUINT64}: ssa.OpOr64,
opAndType{OXOR, TINT8}: ssa.OpXor8,
opAndType{OXOR, TUINT8}: ssa.OpXor8,
opAndType{OXOR, TINT16}: ssa.OpXor16,
opAndType{OXOR, TUINT16}: ssa.OpXor16,
opAndType{OXOR, TINT32}: ssa.OpXor32,
opAndType{OXOR, TUINT32}: ssa.OpXor32,
opAndType{OXOR, TINT64}: ssa.OpXor64,
opAndType{OXOR, TUINT64}: ssa.OpXor64,
opAndType{OEQ, TBOOL}: ssa.OpEqB,
opAndType{OEQ, TINT8}: ssa.OpEq8,
opAndType{OEQ, TUINT8}: ssa.OpEq8,
opAndType{OEQ, TINT16}: ssa.OpEq16,
opAndType{OEQ, TUINT16}: ssa.OpEq16,
opAndType{OEQ, TINT32}: ssa.OpEq32,
opAndType{OEQ, TUINT32}: ssa.OpEq32,
opAndType{OEQ, TINT64}: ssa.OpEq64,
opAndType{OEQ, TUINT64}: ssa.OpEq64,
opAndType{OEQ, TINTER}: ssa.OpEqInter,
opAndType{OEQ, TSLICE}: ssa.OpEqSlice,
opAndType{OEQ, TFUNC}: ssa.OpEqPtr,
opAndType{OEQ, TMAP}: ssa.OpEqPtr,
opAndType{OEQ, TCHAN}: ssa.OpEqPtr,
opAndType{OEQ, TPTR32}: ssa.OpEqPtr,
opAndType{OEQ, TPTR64}: ssa.OpEqPtr,
opAndType{OEQ, TUINTPTR}: ssa.OpEqPtr,
opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
opAndType{OEQ, TFLOAT64}: ssa.OpEq64F,
opAndType{OEQ, TFLOAT32}: ssa.OpEq32F,
opAndType{ONE, TBOOL}: ssa.OpNeqB,
opAndType{ONE, TINT8}: ssa.OpNeq8,
opAndType{ONE, TUINT8}: ssa.OpNeq8,
opAndType{ONE, TINT16}: ssa.OpNeq16,
opAndType{ONE, TUINT16}: ssa.OpNeq16,
opAndType{ONE, TINT32}: ssa.OpNeq32,
opAndType{ONE, TUINT32}: ssa.OpNeq32,
opAndType{ONE, TINT64}: ssa.OpNeq64,
opAndType{ONE, TUINT64}: ssa.OpNeq64,
opAndType{ONE, TINTER}: ssa.OpNeqInter,
opAndType{ONE, TSLICE}: ssa.OpNeqSlice,
opAndType{ONE, TFUNC}: ssa.OpNeqPtr,
opAndType{ONE, TMAP}: ssa.OpNeqPtr,
opAndType{ONE, TCHAN}: ssa.OpNeqPtr,
opAndType{ONE, TPTR32}: ssa.OpNeqPtr,
opAndType{ONE, TPTR64}: ssa.OpNeqPtr,
opAndType{ONE, TUINTPTR}: ssa.OpNeqPtr,
opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
opAndType{ONE, TFLOAT64}: ssa.OpNeq64F,
opAndType{ONE, TFLOAT32}: ssa.OpNeq32F,
opAndType{OLT, TINT8}: ssa.OpLess8,
opAndType{OLT, TUINT8}: ssa.OpLess8U,
opAndType{OLT, TINT16}: ssa.OpLess16,
opAndType{OLT, TUINT16}: ssa.OpLess16U,
opAndType{OLT, TINT32}: ssa.OpLess32,
opAndType{OLT, TUINT32}: ssa.OpLess32U,
opAndType{OLT, TINT64}: ssa.OpLess64,
opAndType{OLT, TUINT64}: ssa.OpLess64U,
opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
opAndType{OGT, TINT8}: ssa.OpGreater8,
opAndType{OGT, TUINT8}: ssa.OpGreater8U,
opAndType{OGT, TINT16}: ssa.OpGreater16,
opAndType{OGT, TUINT16}: ssa.OpGreater16U,
opAndType{OGT, TINT32}: ssa.OpGreater32,
opAndType{OGT, TUINT32}: ssa.OpGreater32U,
opAndType{OGT, TINT64}: ssa.OpGreater64,
opAndType{OGT, TUINT64}: ssa.OpGreater64U,
opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
opAndType{OLE, TINT8}: ssa.OpLeq8,
opAndType{OLE, TUINT8}: ssa.OpLeq8U,
opAndType{OLE, TINT16}: ssa.OpLeq16,
opAndType{OLE, TUINT16}: ssa.OpLeq16U,
opAndType{OLE, TINT32}: ssa.OpLeq32,
opAndType{OLE, TUINT32}: ssa.OpLeq32U,
opAndType{OLE, TINT64}: ssa.OpLeq64,
opAndType{OLE, TUINT64}: ssa.OpLeq64U,
opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
opAndType{OGE, TINT8}: ssa.OpGeq8,
opAndType{OGE, TUINT8}: ssa.OpGeq8U,
opAndType{OGE, TINT16}: ssa.OpGeq16,
opAndType{OGE, TUINT16}: ssa.OpGeq16U,
opAndType{OGE, TINT32}: ssa.OpGeq32,
opAndType{OGE, TUINT32}: ssa.OpGeq32U,
opAndType{OGE, TINT64}: ssa.OpGeq64,
opAndType{OGE, TUINT64}: ssa.OpGeq64U,
opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
opAndType{OLROT, TUINT8}: ssa.OpLrot8,
opAndType{OLROT, TUINT16}: ssa.OpLrot16,
opAndType{OLROT, TUINT32}: ssa.OpLrot32,
opAndType{OLROT, TUINT64}: ssa.OpLrot64,
opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
}
func (s *state) concreteEtype(t *Type) EType {
e := t.Etype
switch e {
default:
return e
case TINT:
if s.config.IntSize == 8 {
return TINT64
}
return TINT32
case TUINT:
if s.config.IntSize == 8 {
return TUINT64
}
return TUINT32
case TUINTPTR:
if s.config.PtrSize == 8 {
return TUINT64
}
return TUINT32
}
}
func (s *state) ssaOp(op Op, t *Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Unimplementedf("unhandled binary op %s %s", op, etype)
}
return x
}
func floatForComplex(t *Type) *Type {
if t.Size() == 8 {
return Types[TFLOAT32]
} else {
return Types[TFLOAT64]
}
}
type opAndTwoTypes struct {
op Op
etype1 EType
etype2 EType
}
type twoTypes struct {
etype1 EType
etype2 EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TINT8, TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
twoTypes{TINT8, TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
twoTypes{TFLOAT32, TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
twoTypes{TFLOAT64, TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
// unsigned
twoTypes{TUINT8, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto32F, branchy code expansion instead
twoTypes{TUINT8, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64}, // Cvt64Uto64F, branchy code expansion instead
twoTypes{TFLOAT32, TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt32Fto64U, branchy code expansion instead
twoTypes{TFLOAT64, TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
}
// this map is used only for 32-bit arch, and only includes the difference
// on 32-bit arch, don't use int64<->float conversion for uint32
var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
}
// uint64<->float conversions, only on machines that have intructions for that
var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
}
var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{OLSH, TINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TUINT8, TUINT8}: ssa.OpLsh8x8,
opAndTwoTypes{OLSH, TINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
opAndTwoTypes{OLSH, TINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
opAndTwoTypes{OLSH, TINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
opAndTwoTypes{OLSH, TINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TUINT16, TUINT8}: ssa.OpLsh16x8,
opAndTwoTypes{OLSH, TINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
opAndTwoTypes{OLSH, TINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
opAndTwoTypes{OLSH, TINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
opAndTwoTypes{OLSH, TINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TUINT32, TUINT8}: ssa.OpLsh32x8,
opAndTwoTypes{OLSH, TINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
opAndTwoTypes{OLSH, TINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
opAndTwoTypes{OLSH, TINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
opAndTwoTypes{OLSH, TINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TUINT64, TUINT8}: ssa.OpLsh64x8,
opAndTwoTypes{OLSH, TINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
opAndTwoTypes{OLSH, TINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
opAndTwoTypes{OLSH, TINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
opAndTwoTypes{ORSH, TINT8, TUINT8}: ssa.OpRsh8x8,
opAndTwoTypes{ORSH, TUINT8, TUINT8}: ssa.OpRsh8Ux8,
opAndTwoTypes{ORSH, TINT8, TUINT16}: ssa.OpRsh8x16,
opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
opAndTwoTypes{ORSH, TINT8, TUINT32}: ssa.OpRsh8x32,
opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
opAndTwoTypes{ORSH, TINT8, TUINT64}: ssa.OpRsh8x64,
opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
opAndTwoTypes{ORSH, TINT16, TUINT8}: ssa.OpRsh16x8,
opAndTwoTypes{ORSH, TUINT16, TUINT8}: ssa.OpRsh16Ux8,
opAndTwoTypes{ORSH, TINT16, TUINT16}: ssa.OpRsh16x16,
opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
opAndTwoTypes{ORSH, TINT16, TUINT32}: ssa.OpRsh16x32,
opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
opAndTwoTypes{ORSH, TINT16, TUINT64}: ssa.OpRsh16x64,
opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
opAndTwoTypes{ORSH, TINT32, TUINT8}: ssa.OpRsh32x8,
opAndTwoTypes{ORSH, TUINT32, TUINT8}: ssa.OpRsh32Ux8,
opAndTwoTypes{ORSH, TINT32, TUINT16}: ssa.OpRsh32x16,
opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
opAndTwoTypes{ORSH, TINT32, TUINT32}: ssa.OpRsh32x32,
opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
opAndTwoTypes{ORSH, TINT32, TUINT64}: ssa.OpRsh32x64,
opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
opAndTwoTypes{ORSH, TINT64, TUINT8}: ssa.OpRsh64x8,
opAndTwoTypes{ORSH, TUINT64, TUINT8}: ssa.OpRsh64Ux8,
opAndTwoTypes{ORSH, TINT64, TUINT16}: ssa.OpRsh64x16,
opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
opAndTwoTypes{ORSH, TINT64, TUINT32}: ssa.OpRsh64x32,
opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
opAndTwoTypes{ORSH, TINT64, TUINT64}: ssa.OpRsh64x64,
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2)
}
return x
}
func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
etype1 := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype1}]
if !ok {
s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1)
}
return x
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
s.pushLine(n.Lineno)
defer s.popLine()
}
s.stmtList(n.Ninit)
switch n.Op {
case OCFUNC:
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym})
return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
case ONAME:
if n.Class == PFUNC {
// "value" of a function is the address of the function's closure
sym := funcsym(n.Sym)
aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym}
return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
}
if s.canSSA(n) {
return s.variable(n, n.Type)
}
addr, _ := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OCLOSUREVAR:
addr, _ := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OLITERAL:
switch u := n.Val().U.(type) {
case *Mpint:
i := u.Int64()
switch n.Type.Size() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
return s.constInt16(n.Type, int16(i))
case 4:
return s.constInt32(n.Type, int32(i))
case 8:
return s.constInt64(n.Type, i)
default:
s.Fatalf("bad integer size %d", n.Type.Size())
return nil
}
case string:
if u == "" {
return s.constEmptyString(n.Type)
}
return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
case bool:
return s.constBool(u)
case *NilVal:
t := n.Type
switch {
case t.IsSlice():
return s.constSlice(t)
case t.IsInterface():
return s.constInterface(t)
default:
return s.constNil(t)
}
case *Mpflt:
switch n.Type.Size() {
case 4:
return s.constFloat32(n.Type, u.Float32())
case 8:
return s.constFloat64(n.Type, u.Float64())
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
case *Mpcplx:
r := &u.Real
i := &u.Imag
switch n.Type.Size() {
case 8:
pt := Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, i.Float32()))
case 16:
pt := Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
default:
s.Fatalf("bad float size %d", n.Type.Size())
return nil
}
default:
s.Unimplementedf("unhandled OLITERAL %v", n.Val().Ctype())
return nil
}
case OCONVNOP:
to := n.Type
from := n.Left.Type
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
x := s.expr(n.Left)
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
// as not-pointers or vice-versa because of copy
// elision.
if to.IsPtrShaped() != from.IsPtrShaped() {
return s.newValue2(ssa.OpConvert, to, x, s.mem())
}
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
if to.Etype == TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
if from.Etype == to.Etype {
return v
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
return v
}
dowidth(from)
dowidth(to)
if from.Width != to.Width {
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
return nil
}
if instrumenting {
// These appear to be fine, but they fail the
// integer constraint below, so okay them here.
// Sample non-integer conversion: map[string]string -> *uint8
return v
}
if etypesign(from.Etype) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
// integer, same width, same sign
return v
case OCONV:
x := s.expr(n.Left)
ft := n.Left.Type // from type
tt := n.Type // to type
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
if tt.Size() == ft.Size() {
op = ssa.OpCopy
} else if tt.Size() < ft.Size() {
// truncation
switch 10*ft.Size() + tt.Size() {
case 21:
op = ssa.OpTrunc16to8
case 41:
op = ssa.OpTrunc32to8
case 42:
op = ssa.OpTrunc32to16
case 81:
op = ssa.OpTrunc64to8
case 82:
op = ssa.OpTrunc64to16
case 84:
op = ssa.OpTrunc64to32
default:
s.Fatalf("weird integer truncation %s -> %s", ft, tt)
}
} else if ft.IsSigned() {
// sign extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpSignExt8to16
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad integer sign extension %s -> %s", ft, tt)
}
} else {
// zero extension
switch 10*ft.Size() + tt.Size() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("weird integer sign extension %s -> %s", ft, tt)
}
}
return s.newValue1(op, n.Type, x)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if Thearch.LinkArch.Name == "arm64" {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if !ok {
s.Fatalf("weird float conversion %s -> %s", ft, tt)
}
op1, op2, it := conv.op1, conv.op2, conv.intermediateType
if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
// normal case, not tripping over unsigned 64
if op1 == ssa.OpCopy {
if op2 == ssa.OpCopy {
return x
}
return s.newValue1(op2, n.Type, x)
}
if op2 == ssa.OpCopy {
return s.newValue1(op1, n.Type, x)
}
return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// therefore tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
if tt.Size() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %s -> %s", ft, tt)
}
// therefore ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
if ft.Size() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %s -> %s", ft, tt)
return nil
}
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
if ft.Size() == tt.Size() {
op = ssa.OpCopy
} else if ft.Size() == 8 && tt.Size() == 16 {
op = ssa.OpCvt32Fto64F
} else if ft.Size() == 16 && tt.Size() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %s -> %s", ft, tt)
}
ftp := floatForComplex(ft)
ttp := floatForComplex(tt)
return s.newValue2(ssa.OpComplexMake, tt,
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
return nil
case ODOTTYPE:
res, _ := s.dottype(n, false)
return res
// binary ops
case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Left.Type.IsComplex() {
pt := floatForComplex(n.Left.Type)
op := s.ssaOp(OEQ, pt)
r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAnd8, Types[TBOOL], r, i)
switch n.Op {
case OEQ:
return c
case ONE:
return s.newValue1(ssa.OpNot, Types[TBOOL], c)
default:
s.Fatalf("ordered complex compare %s", n.Op)
}
}
return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
case OMUL:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case ODIV:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
pt := floatForComplex(n.Type) // Could be Float32 or Float64
wt := Types[TFLOAT64] // Compute in Float64 to minimize cancelation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
breal := s.newValue1(ssa.OpComplexReal, pt, b)
aimag := s.newValue1(ssa.OpComplexImag, pt, a)
bimag := s.newValue1(ssa.OpComplexImag, pt, b)
if pt != wt { // Widen for calculation
areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
}
denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
// TODO not sure if this is best done in wide precision or narrow
// Double-rounding might be an issue.
// Note that the pre-SSA implementation does the entire calculation
// in wide format, so wide is compatible.
xreal = s.newValue2(divop, wt, xreal, denom)
ximag = s.newValue2(divop, wt, ximag, denom)
if pt != wt { // Narrow to store back
xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
}
return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
}
if n.Type.IsFloat() {
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
} else {
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
}
case OMOD:
a := s.expr(n.Left)
b := s.expr(n.Right)
// do a size-appropriate check for zero
cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
s.check(cmp, panicdivide)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OADD, OSUB:
a := s.expr(n.Left)
b := s.expr(n.Right)
if n.Type.IsComplex() {
pt := floatForComplex(n.Type)
op := s.ssaOp(n.Op, pt)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OAND, OOR, OHMUL, OXOR:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
case OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
case OLROT:
a := s.expr(n.Left)
i := n.Right.Int64()
if i <= 0 || i >= n.Type.Size()*8 {
s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
}
return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
case OANDAND, OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// variable is associated with the OANDAND node in the
// s.vars table (normally variables are only
// associated with ONAME nodes). We convert
// A && B
// to
// var = A
// if var {
// var = B
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
el := s.expr(n.Left)
s.vars[n] = el
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(el)
// In theory, we should set b.Likely here based on context.
// However, gc only gives us likeliness hints
// in a single place, for plain OIF statements,
// and passing around context is finnicky, so don't bother for now.
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
if n.Op == OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
} else if n.Op == OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
er := s.expr(n.Right)
s.vars[n] = er
b = s.endBlock()
b.AddEdgeTo(bResult)
s.startBlock(bResult)
return s.variable(n, Types[TBOOL])
case OCOMPLEX:
r := s.expr(n.Left)
i := s.expr(n.Right)
return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
// unary ops
case OMINUS:
a := s.expr(n.Left)
if n.Type.IsComplex() {
tp := floatForComplex(n.Type)
negop := s.ssaOp(n.Op, tp)
return s.newValue2(ssa.OpComplexMake, n.Type,
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case ONOT, OCOM, OSQRT:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
case OIMAG, OREAL:
a := s.expr(n.Left)
return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
case OPLUS:
return s.expr(n.Left)
case OADDR:
a, _ := s.addr(n.Left, n.Bounded)
// Note we know the volatile result is false because you can't write &f() in Go.
return a
case OINDREG:
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in expr: %v", obj.Rconv(int(n.Reg)), n)
return nil
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.sp)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
case OIND:
p := s.exprPtr(n.Left, false, n.Lineno)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOT:
t := n.Left.Type
if canSSAType(t) {
v := s.expr(n.Left)
return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
}
p, _ := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case ODOTPTR:
p := s.exprPtr(n.Left, false, n.Lineno)
p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
switch {
case n.Left.Type.IsString():
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, Panicindex)
if !n.Bounded {
len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
s.boundsCheck(i, len)
}
ptrtyp := Ptrto(Types[TUINT8])
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
if Isconst(n.Right, CTINT) {
ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p, _ := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
case n.Left.Type.IsArray():
// TODO: fix when we can SSA arrays of length 1.
p, _ := s.addr(n, false)
return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
default:
s.Fatalf("bad type for index %v", n.Left.Type)
return nil
}
case OLEN, OCAP:
switch {
case n.Left.Type.IsSlice():
op := ssa.OpSliceLen
if n.Op == OCAP {
op = ssa.OpSliceCap
}
return s.newValue1(op, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsString(): // string; not reachable for OCAP
return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
case n.Left.Type.IsMap(), n.Left.Type.IsChan():
return s.referenceTypeBuiltin(n, s.expr(n.Left))
default: // array
return s.constInt(Types[TINT], n.Left.Type.NumElem())
}
case OSPTR:
a := s.expr(n.Left)
if n.Left.Type.IsSlice() {
return s.newValue1(ssa.OpSlicePtr, n.Type, a)
} else {
return s.newValue1(ssa.OpStringPtr, n.Type, a)
}
case OITAB:
a := s.expr(n.Left)
return s.newValue1(ssa.OpITab, n.Type, a)
case OIDATA:
a := s.expr(n.Left)
return s.newValue1(ssa.OpIData, n.Type, a)
case OEFACE:
tab := s.expr(n.Left)
data := s.expr(n.Right)
// The frontend allows putting things like struct{*byte} in
// the data portion of an eface. But we don't want struct{*byte}
// as a register type because (among other reasons) the liveness
// analysis is confused by the "fat" variables that result from
// such types being spilled.
// So here we ensure that we are selecting the underlying pointer
// when we build an eface.
// TODO: get rid of this now that structs can be SSA'd?
for !data.Type.IsPtrShaped() {
switch {
case data.Type.IsArray():
data = s.newValue1I(ssa.OpArrayIndex, data.Type.ElemType(), 0, data)
case data.Type.IsStruct():
for i := data.Type.NumFields() - 1; i >= 0; i-- {
f := data.Type.FieldType(i)
if f.Size() == 0 {
// eface type could also be struct{p *byte; q [0]int}
continue
}
data = s.newValue1I(ssa.OpStructSelect, f, int64(i), data)
break
}
default:
s.Fatalf("type being put into an eface isn't a pointer")
}
}
return s.newValue2(ssa.OpIMake, n.Type, tab, data)
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
v := s.expr(n.Left)
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
if max != nil {
k = s.extendIndex(s.expr(max), panicslice)
}
p, l, c := s.slice(n.Left.Type, v, i, j, k)
return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
case OSLICESTR:
v := s.expr(n.Left)
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
i = s.extendIndex(s.expr(low), panicslice)
}
if high != nil {
j = s.extendIndex(s.expr(high), panicslice)
}
p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
return s.newValue2(ssa.OpStringMake, n.Type, p, l)
case OCALLFUNC:
if isIntrinsicCall1(n) {
return s.intrinsicCall1(n)
}
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
case OAPPEND:
return s.append(n, false)
default:
s.Unimplementedf("unhandled expr %s", n.Op)
return nil
}
}
// append converts an OAPPEND node to SSA.
// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
// adds it to s, and returns the Value.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
func (s *state) append(n *Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// ptr, len, cap = growslice(s, newlen)
// newlen = len + 3 // recalculate to avoid a spill
// }
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
// return makeslice(ptr, newlen, cap)
//
//
// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
//
// a := &s
// ptr, len, cap := s
// newlen := len + 3
// if newlen > cap {
// newptr, len, newcap = growslice(ptr, len, cap, newlen)
// vardef(a) // if necessary, advise liveness we are writing a new a
// *a.cap = newcap // write before ptr to avoid a spill
// *a.ptr = newptr // with write barrier
// }
// newlen = len + 3 // recalculate to avoid a spill
// *a.len = newlen
// // with write barriers, if needed:
// *(ptr+len) = e1
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
et := n.Type.Elem()
pt := Ptrto(et)
// Evaluate slice
sn := n.List.First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr, _ = s.addr(sn, false)
slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
} else {
slice = s.expr(sn)
}
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
s.vars[&ptrVar] = p
if !inplace {
s.vars[&newlenVar] = nl
s.vars[&capVar] = c
} else {
s.vars[&lenVar] = l
}
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
b.AddEdgeTo(grow)
b.AddEdgeTo(assign)
// Call growslice
s.startBlock(grow)
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb)
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
if inplace {
if sn.Op == ONAME {
// Tell liveness we're about to build a new slice
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
}
capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
s.insertWBstore(pt, addr, r[0], n.Lineno, 0)
// load the value we just stored to avoid having to spill it
s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
s.vars[&lenVar] = r[1] // avoid a spill in the fast path
} else {
s.vars[&ptrVar] = r[0]
s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
s.vars[&capVar] = r[2]
}
b = s.endBlock()
b.AddEdgeTo(assign)
// assign new elements to slots
s.startBlock(assign)
if inplace {
l = s.variable(&lenVar, Types[TINT]) // generates phi for len
nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem())
}
// Evaluate args
type argRec struct {
// if store is true, we're appending the value v. If false, we're appending the
// value at *v. If store==false, isVolatile reports whether the source
// is in the outargs section of the stack frame.
v *ssa.Value
store bool
isVolatile bool
}
args := make([]argRec, 0, nargs)
for _, n := range n.List.Slice()[1:] {
if canSSAType(n.Type) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v, isVolatile := s.addr(n, false)
args = append(args, argRec{v: v, isVolatile: isVolatile})
}
}
p = s.variable(&ptrVar, pt) // generates phi for ptr
if !inplace {
nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
c = s.variable(&capVar, Types[TINT]) // generates phi for cap
}
p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
// TODO: just one write barrier call for all of these writes?
// TODO: maybe just one writeBarrier.enabled check?
for i, arg := range args {
addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
if arg.store {
if haspointers(et) {
s.insertWBstore(et, addr, arg.v, n.Lineno, 0)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
}
} else {
if haspointers(et) {
s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
} else {
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(et), addr, arg.v, s.mem())
}
}
}
delete(s.vars, &ptrVar)
if inplace {
delete(s.vars, &lenVar)
return nil
}
delete(s.vars, &newlenVar)
delete(s.vars, &capVar)
// make result
return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
if cond.Op == OANDAND {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, mid, no, max8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
}
if cond.Op == OOROR {
mid := s.f.NewBlock(ssa.BlockPlain)
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, yes, mid, min8(likely, 0))
s.startBlock(mid)
s.condBranch(cond.Right, yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
}
if cond.Op == ONOT {
s.stmtList(cond.Ninit)
s.condBranch(cond.Left, no, yes, -likely)
return
}
c := s.expr(cond)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(c)
b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
b.AddEdgeTo(yes)
b.AddEdgeTo(no)
}
type skipMask uint8
const (
skipPtr skipMask = 1 << iota
skipLen
skipCap
)
// assign does left = right.
// Right has already been evaluated to ssa, left has not.
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage.
// Include a write barrier if wb is true.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) {
if left.Op == ONAME && isblank(left) {
return
}
t := left.Type
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %s but not RHS %s", left, right)
}
if left.Op == ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For instance:
// type T struct {a, b, c int}
// var T x
// x.b = 5
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
t := left.Left.Type
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
old := s.expr(left.Left)
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
// Add fields as args.
for i := 0; i < nf; i++ {
if i == idx {
new.AddArg(right)
} else {
new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
}
}
// Recursively assign the new value we've made to the base of the dot op.
s.assign(left.Left, new, false, false, line, 0, rightIsVolatile)
// TODO: do we need to update named values here?
return
}
// Update variable assignment.
s.vars[left] = right
s.addNamedValue(left, right)
return
}
// Left is not ssa-able. Compute its address.
addr, _ := s.addr(left, false)
if left.Op == ONAME && skip == 0 {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
}
if deref {
// Treat as a mem->mem move.
if right == nil {
s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, SizeAlignAuxInt(t), addr, s.mem())
return
}
if wb {
s.insertWBmove(t, addr, right, line, rightIsVolatile)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), addr, right, s.mem())
return
}
// Treat as a store.
if wb {
if skip&skipPtr != 0 {
// Special case: if we don't write back the pointers, don't bother
// doing the write barrier check.
s.storeTypeScalars(t, addr, right, skip)
return
}
s.insertWBstore(t, addr, right, line, skip)
return
}
if skip != 0 {
if skip&skipPtr == 0 {
s.storeTypePtrs(t, addr, right)
}
s.storeTypeScalars(t, addr, right, skip)
return
}
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
}
// zeroVal returns the zero value for type t.
func (s *state) zeroVal(t *Type) *ssa.Value {
switch {
case t.IsInteger():
switch t.Size() {
case 1:
return s.constInt8(t, 0)
case 2:
return s.constInt16(t, 0)
case 4:
return s.constInt32(t, 0)
case 8:
return s.constInt64(t, 0)
default:
s.Fatalf("bad sized integer type %s", t)
}
case t.IsFloat():
switch t.Size() {
case 4:
return s.constFloat32(t, 0)
case 8:
return s.constFloat64(t, 0)
default:
s.Fatalf("bad sized float type %s", t)
}
case t.IsComplex():
switch t.Size() {
case 8:
z := s.constFloat32(Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
case 16:
z := s.constFloat64(Types[TFLOAT64], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
default:
s.Fatalf("bad sized complex type %s", t)
}
case t.IsString():
return s.constEmptyString(t)
case t.IsPtrShaped():
return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
return s.constInterface(t)
case t.IsSlice():
return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
for i := 0; i < n; i++ {
v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
}
return v
}
s.Unimplementedf("zero for type %v not implemented", t)
return nil
}
type callKind int8
const (
callNormal callKind = iota
callDefer
callGo
)
// isSSAIntrinsic1 returns true if n is a call to a recognized 1-arg intrinsic
// that can be handled by the SSA backend.
// SSA uses this, but so does the front end to see if should not
// inline a function because it is a candidate for intrinsic
// substitution.
func isSSAIntrinsic1(s *Sym) bool {
// The test below is not quite accurate -- in the event that
// a function is disabled on a per-function basis, for example
// because of hash-keyed binary failure search, SSA might be
// disabled for that function but it would not be noted here,
// and thus an inlining would not occur (in practice, inlining
// so far has only been noticed for Bswap32 and the 16-bit count
// leading/trailing instructions, but heuristics might change
// in the future or on different architectures).
if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
return false
}
if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
switch s.Name {
case
"Ctz64", "Ctz32", "Ctz16",
"Bswap64", "Bswap32":
return true
}
}
return false
}
func isIntrinsicCall1(n *Node) bool {
if n == nil || n.Left == nil {
return false
}
return isSSAIntrinsic1(n.Left.Sym)
}
// intrinsicFirstArg extracts arg from n.List and eval
func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
x := n.List.First()
if x.Op == OAS {
x = x.Right
}
return s.expr(x)
}
// intrinsicCall1 converts a call to a recognized 1-arg intrinsic
// into the intrinsic
func (s *state) intrinsicCall1(n *Node) *ssa.Value {
var result *ssa.Value
switch n.Left.Sym.Name {
case "Ctz64":
result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Ctz32":
result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
case "Ctz16":
result = s.newValue1(ssa.OpCtz16, Types[TUINT16], s.intrinsicFirstArg(n))
case "Bswap64":
result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
case "Bswap32":
result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
}
if result == nil {
Fatalf("Unknown special call: %v", n.Left.Sym)
}
if ssa.IntrinsicsDebug > 0 {
Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
}
return result
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
var sym *Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
sym = fn.Sym
break
}
closure = s.expr(fn)
case OCALLMETH:
if fn.Op != ODOTMETH {
Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
if k == callNormal {
sym = fn.Sym
break
}
n2 := newname(fn.Sym)
n2.Class = PFUNC
n2.Lineno = fn.Lineno
closure = s.expr(n2)
// Note: receiver is already assigned in n.List, so we don't
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
if k != callNormal {
s.nilCheck(itab)
}
itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
itab = s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), itabidx, itab)
if k == callNormal {
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
} else {
closure = itab
}
rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
}
dowidth(fn.Type)
stksize := fn.Type.ArgWidth() // includes receiver
// Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
s.stmtList(n.List)
// Set receiver (for interface calls)
if rcvr != nil {
argStart := Ctxt.FixedFrameSize()
if k != callNormal {
argStart += int64(2 * Widthptr)
}
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
}
// Defer/go args
if k != callNormal {
// Write argsize and closure (args to Newproc/Deferproc).
argStart := Ctxt.FixedFrameSize()
argsize := s.constInt32(Types[TUINT32], int32(stksize))
addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT32]), argStart, s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem())
addr = s.entryNewValue1I(ssa.OpOffPtr, Ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
stksize += 2 * int64(Widthptr)
}
// call target
bNext := s.f.NewBlock(ssa.BlockPlain)
var call *ssa.Value
switch {
case k == callDefer:
call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
case k == callGo:
call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
case closure != nil:
codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
default:
Fatalf("bad call type %s %v", n.Op, n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
// Finish call block
s.vars[&memVar] = call
b := s.endBlock()
b.Kind = ssa.BlockCall
b.SetControl(call)
b.AddEdgeTo(bNext)
if k == callDefer {
// Add recover edge to exit code.
b.Kind = ssa.BlockDefer
r := s.f.NewBlock(ssa.BlockPlain)
s.startBlock(r)
s.exit()
b.AddEdgeTo(r)
b.Likely = ssa.BranchLikely
}
// Start exit block, find address of result.
s.startBlock(bNext)
// Keep input pointer args live across calls. This is a bandaid until 1.8.
for _, n := range s.ptrargs {
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
}
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp)
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
return +1
}
return 0
}
// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
// This improves the effectiveness of cse by using the same Aux values for the
// same symbols.
func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
switch sym.(type) {
default:
s.Fatalf("sym %v is of uknown type %T", sym, sym)
case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
// these are the only valid types
}
if lsym, ok := s.varsyms[n]; ok {
return lsym
} else {
s.varsyms[n] = sym
return sym
}
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// Also returns a bool reporting whether the returned value is "volatile", that is it
// points to the outargs section and thus the referent will be clobbered by any call.
// The value that the returned Value represents is guaranteed to be non-nil.
// If bounded is true then this address does not require a nil check for its operand
// even if that would otherwise be implied.
func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
t := Ptrto(n.Type)
switch n.Op {
case ONAME:
switch n.Class {
case PEXTERN:
// global variable
aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym})
v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
if n.Xoffset != 0 {
v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
}
return v, false
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v != nil {
return v, false
}
if n.String() == ".fp" {
// Special arg that points to the frame pointer.
// (Used by the race detector, others?)
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil, false
case PAUTO:
aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
// ensure that we reuse symbols for out parameters so
// that cse works on their addresses
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
default:
s.Unimplementedf("variable address class %v not implemented", classnames[n.Class])
return nil, false
}
case OINDREG:
// indirect off a register
// used for storing/loading arguments/returns to/from callees
if int(n.Reg) != Thearch.REGSP {
s.Unimplementedf("OINDREG of non-SP register %s in addr: %v", obj.Rconv(int(n.Reg)), n)
return nil, false
}
return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
i := s.expr(n.Right)
i = s.extendIndex(i, Panicindex)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
if !n.Bounded {
s.boundsCheck(i, len)
}
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i), false
} else { // array
a, isVolatile := s.addr(n.Left, bounded)
i := s.expr(n.Right)
i = s.extendIndex(i, Panicindex)
len := s.constInt(Types[TINT], n.Left.Type.NumElem())
if !n.Bounded {
s.boundsCheck(i, len)
}
return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i), isVolatile
}
case OIND:
return s.exprPtr(n.Left, bounded, n.Lineno), false
case ODOT:
p, isVolatile := s.addr(n.Left, bounded)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile
case ODOTPTR:
p := s.exprPtr(n.Left, bounded, n.Lineno)
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false
case OCLOSUREVAR:
return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8]))), false
case OCONVNOP:
addr, isVolatile := s.addr(n.Left, bounded)
return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal), true
default:
s.Unimplementedf("unhandled addr %v", n.Op)
return nil, false
}
}
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
func (s *state) canSSA(n *Node) bool {
if Debug['N'] != 0 {
return false
}
for n.Op == ODOT {
n = n.Left
}
if n.Op != ONAME {
return false
}
if n.Addrtaken {
return false
}
if n.isParamHeapCopy() {
return false
}
if n.Class == PAUTOHEAP {
Fatalf("canSSA of PAUTOHEAP %v", n)
}
switch n.Class {
case PEXTERN:
return false
case PPARAMOUT:
if hasdefer {
// TODO: handle this case? Named return values must be
// in memory so that the deferred function can see them.
// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
return false
}
if s.cgoUnsafeArgs {
// Cgo effectively takes the address of all result args,
// but the compiler can't see that.
return false
}
}
if n.Class == PPARAM && n.String() == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARMOUT?
return false
}
return canSSAType(n.Type)
// TODO: try to make more variables SSAable?
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
// 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
}
switch t.Etype {
case TARRAY:
// We can't do arrays because dynamic indexing is
// not supported on SSA variables.
// TODO: maybe allow if length is <=1? All indexes
// are constant? Might be good for the arrays
// introduced by the compiler for variadic functions.
return false
case TSTRUCT:
if t.NumFields() > ssa.MaxStruct {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
return false
}
}
return true
default:
return true
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil {
if s.f.Config.Debug_checknil() && lineno > 1 {
s.f.Config.Warnl(lineno, "removed nil check")
}
return p
}
s.nilCheck(p)
return p
}
// nilCheck generates nil pointer checking code.
// Starts a new block on return, unless nil checks are disabled.
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
if Disable_checknil != 0 {
return
}
chk := s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockCheck
b.SetControl(chk)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
}
// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
// idx is already converted to full int width.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
s.check(cmp, Panicindex)
}
// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
// idx and len are already converted to full int width.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
return
}
// bounds check
cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
s.check(cmp, panicslice)
}
// If cmp (a bool) is false, panic using the given function.
func (s *state) check(cmp *ssa.Value, fn *Node) {
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bNext := s.f.NewBlock(ssa.BlockPlain)
line := s.peekLine()
bPanic := s.panics[funcLine{fn, line}]
if bPanic == nil {
bPanic = s.f.NewBlock(ssa.BlockPlain)
s.panics[funcLine{fn, line}] = bPanic
s.startBlock(bPanic)
// The panic call takes/returns memory to ensure that the right
// memory state is observed if the panic happens.
s.rtcall(fn, false, nil)
}
b.AddEdgeTo(bNext)
b.AddEdgeTo(bPanic)
s.startBlock(bNext)
}
// rtcall issues a call to the given runtime function fn with the listed args.
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
// If returns is true, the block is marked as a call block. A new block
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp)
}
size := t.Size()
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
off += size
}
off = Rnd(off, int64(Widthptr))
if Thearch.LinkArch.Name == "amd64p32" {
// amd64p32 wants 8-byte alignment of the start of the return values.
off = Rnd(off, 8)
}
// Issue call
call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
s.vars[&memVar] = call
// Finish block
b := s.endBlock()
if !returns {
b.Kind = ssa.BlockExit
b.SetControl(call)
call.AuxInt = off - Ctxt.FixedFrameSize()
if len(results) > 0 {
Fatalf("panic call can't have results")
}
return nil
}
b.Kind = ssa.BlockCall
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
s.startBlock(bNext)
// Keep input pointer args live across calls. This is a bandaid until 1.8.
for _, n := range s.ptrargs {
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, s.variable(n, n.Type), s.mem())
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
off = Rnd(off, t.Alignment())
ptr := s.sp
if off != 0 {
ptr = s.newValue1I(ssa.OpOffPtr, Ptrto(t), off, s.sp)
}
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
off += t.Size()
}
off = Rnd(off, int64(Widthptr))
// Remember how much callee stack space we needed.
call.AuxInt = off
return res
}
// insertWBmove inserts the assignment *left = *right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) {
// if writeBarrier.enabled {
// typedmemmove(&t, left, right)
// } else {
// *left = *right
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// Load word, test word, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
s.startBlock(bThen)
if !rightIsVolatile {
// Issue typedmemmove call.
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, right)
} else {
// Copy to temp location if the source is volatile (will be clobbered by
// a function call). Marshaling the args to typedmemmove might clobber the
// value we're trying to move.
tmp := temp(t)
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
tmpaddr, _ := s.addr(tmp, true)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), tmpaddr, right, s.mem())
// Issue typedmemmove call.
taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}, s.sb)
s.rtcall(typedmemmove, true, nil, taddr, left, tmpaddr)
// Mark temp as dead.
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
}
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bElse)
s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, SizeAlignAuxInt(t), left, right, s.mem())
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// insertWBstore inserts the assignment *left = right including a write barrier.
// t is the type being assigned.
func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
// store scalar fields
// if writeBarrier.enabled {
// writebarrierptr for pointer fields
// } else {
// store pointer fields
// }
if s.noWB {
s.Fatalf("write barrier prohibited")
}
if s.WBLineno == 0 {
s.WBLineno = left.Line
}
s.storeTypeScalars(t, left, right, skip)
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Typ: Types[TBOOL], Sym: syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// Load word, test word, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue2(ssa.OpNeq32, Types[TBOOL], flag, s.constInt32(Types[TUINT32], 0))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(flag)
b.AddEdgeTo(bThen)
b.AddEdgeTo(bElse)
// Issue write barriers for pointer writes.
s.startBlock(bThen)
s.storeTypePtrsWB(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
// Issue regular stores for pointer writes.
s.startBlock(bElse)
s.storeTypePtrs(t, left, right)
s.endBlock().AddEdgeTo(bEnd)
s.startBlock(bEnd)
if Debug_wb > 0 {
Warnl(line, "write barrier")
}
}
// do *left = right for all scalar (non-pointer) parts of t.
func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
switch {
case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
case t.IsPtrShaped():
// no scalar fields.
case t.IsString():
if skip&skipLen != 0 {
return
}
len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
case t.IsSlice():
if skip&skipLen == 0 {
len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
lenAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
}
if skip&skipCap == 0 {
cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
capAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TINT]), 2*s.config.IntSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
}
case t.IsInterface():
// itab field doesn't need a write barrier (even though it is a pointer).
itab := s.newValue1(ssa.OpITab, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypeScalars(ft.(*Type), addr, val, 0)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right for all pointer parts of t.
func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
case t.IsInterface():
// itab field is treated as a scalar.
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrs(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// do *left = right with a write barrier for all pointer parts of t.
func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
switch {
case t.IsPtrShaped():
s.rtcall(writebarrierptr, true, nil, left, right)
case t.IsString():
ptr := s.newValue1(ssa.OpStringPtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsSlice():
ptr := s.newValue1(ssa.OpSlicePtr, Ptrto(Types[TUINT8]), right)
s.rtcall(writebarrierptr, true, nil, left, ptr)
case t.IsInterface():
idata := s.newValue1(ssa.OpIData, Ptrto(Types[TUINT8]), right)
idataAddr := s.newValue1I(ssa.OpOffPtr, Ptrto(Types[TUINT8]), s.config.PtrSize, left)
s.rtcall(writebarrierptr, true, nil, idataAddr, idata)
case t.IsStruct():
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !haspointers(ft.(*Type)) {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
s.storeTypePtrsWB(ft.(*Type), addr, val)
}
default:
s.Fatalf("bad write barrier type %s", t)
}
}
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// t is a slice, ptr to array, or string type.
func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var elemtype *Type
var ptrtype *Type
var ptr *ssa.Value
var len *ssa.Value
var cap *ssa.Value
zero := s.constInt(Types[TINT], 0)
switch {
case t.IsSlice():
elemtype = t.Elem()
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
case t.IsString():
elemtype = Types[TUINT8]
ptrtype = Ptrto(elemtype)
ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
cap = len
case t.IsPtr():
if !t.Elem().IsArray() {
s.Fatalf("bad ptr to array in slice %v\n", t)
}
elemtype = t.Elem().Elem()
ptrtype = Ptrto(elemtype)
s.nilCheck(v)
ptr = v
len = s.constInt(Types[TINT], t.Elem().NumElem())
cap = len
default:
s.Fatalf("bad type in slice %v\n", t)
}
// Set default values
if i == nil {
i = zero
}
if j == nil {
j = len
}
if k == nil {
k = cap
}
// Panic if slice indices are not in bounds.
s.sliceBoundsCheck(i, j)
if j != k {
s.sliceBoundsCheck(j, k)
}
if k != cap {
s.sliceBoundsCheck(k, cap)
}
// Generate the following code assuming that indexes are in bounds.
// The conditional is to make sure that we don't generate a slice
// that points to the next object in memory.
// rlen = j-i
// rcap = k-i
// delta = i*elemsize
// if rcap == 0 {
// delta = 0
// }
// rptr = p+delta
// result = (SliceMake rptr rlen rcap)
subOp := s.ssaOp(OSUB, Types[TINT])
eqOp := s.ssaOp(OEQ, Types[TINT])
mulOp := s.ssaOp(OMUL, Types[TINT])
rlen := s.newValue2(subOp, Types[TINT], j, i)
var rcap *ssa.Value
switch {
case t.IsString():
// Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
case j == k:
rcap = rlen
default:
rcap = s.newValue2(subOp, Types[TINT], k, i)
}
// delta = # of elements to offset pointer by.
s.vars[&deltaVar] = i
// Generate code to set delta=0 if the resulting capacity is zero.
if !((i.Op == ssa.OpConst64 && i.AuxInt == 0) ||
(i.Op == ssa.OpConst32 && int32(i.AuxInt) == 0)) {
cmp := s.newValue2(eqOp, Types[TBOOL], rcap, zero)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
b.SetControl(cmp)
// Generate block which zeros the delta variable.
nz := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(nz)
s.startBlock(nz)
s.vars[&deltaVar] = zero
s.endBlock()
// All done.
merge := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(merge)
nz.AddEdgeTo(merge)
s.startBlock(merge)
// TODO: use conditional moves somehow?
}
// Compute rptr = ptr + delta * elemsize
rptr := s.newValue2(ssa.OpAddPtr, ptrtype, ptr, s.newValue2(mulOp, Types[TINT], s.variable(&deltaVar, Types[TINT]), s.constInt(Types[TINT], elemtype.Width)))
delete(s.vars, &deltaVar)
return rptr, rlen, rcap
}
type u2fcvtTab struct {
geq, cvt2F, and, rsh, or, add ssa.Op
one func(*state, ssa.Type, int64) *ssa.Value
}
var u64_f64 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to64F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd64F,
one: (*state).constInt64,
}
var u64_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq64,
cvt2F: ssa.OpCvt64to32F,
and: ssa.OpAnd64,
rsh: ssa.OpRsh64Ux64,
or: ssa.OpOr64,
add: ssa.OpAdd32F,
one: (*state).constInt64,
}
// Excess generality on a machine with 64-bit integer registers.
// Not used on AMD64.
var u32_f32 u2fcvtTab = u2fcvtTab{
geq: ssa.OpGeq32,
cvt2F: ssa.OpCvt32to32F,
and: ssa.OpAnd32,
rsh: ssa.OpRsh32Ux32,
or: ssa.OpOr32,
add: ssa.OpAdd32F,
one: func(s *state, t ssa.Type, x int64) *ssa.Value {
return s.constInt32(t, int32(x))
},
}
func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f64, n, x, ft, tt)
}
func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.uintTofloat(&u64_f32, n, x, ft, tt)
}
func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
// y = uintX(x) ; y = x & 1
// z = uintX(x) ; z = z >> 1
// z = z >> 1
// z = z | y
// result = floatY(z)
// result = result + result
// }
//
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
// conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
// double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
// difference in the resulting rounding. Therefore, we
// preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2F, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
one := cvttab.one(s, ft, 1)
y := s.newValue2(cvttab.and, ft, x, one)
z := s.newValue2(cvttab.rsh, ft, x, one)
z = s.newValue2(cvttab.or, ft, z, y)
a := s.newValue1(cvttab.cvt2F, tt, z)
a1 := s.newValue2(cvttab.add, tt, a, a)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// return 0
// } else {
// // len
// return *((*int)n)
// // cap
// return *(((*int)n)+1)
// }
lenType := n.Type
nilValue := s.constNil(Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchUnlikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
// length/capacity of a nil map/chan is zero
b.AddEdgeTo(bThen)
s.startBlock(bThen)
s.vars[n] = s.zeroVal(lenType)
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
if n.Op == OLEN {
// length is stored in the first word for map/chan
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
} else if n.Op == OCAP {
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
} else {
s.Fatalf("op must be OLEN or OCAP")
}
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, lenType)
}
type f2uCvtTab struct {
ltf, cvt2U, subf ssa.Op
value func(*state, ssa.Type, float64) *ssa.Value
}
var f32_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess32F,
cvt2U: ssa.OpCvt32Fto64,
subf: ssa.OpSub32F,
value: (*state).constFloat32,
}
var f64_u64 f2uCvtTab = f2uCvtTab{
ltf: ssa.OpLess64F,
cvt2U: ssa.OpCvt64Fto64,
subf: ssa.OpSub64F,
value: (*state).constFloat64,
}
func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
// if x < 9223372036854775808.0 {
// result = uintY(x)
// } else {
// y = x - 9223372036854775808.0
// z = uintY(y)
// result = z | -9223372036854775808
// }
twoToThe63 := cvttab.value(s, ft, 9223372036854775808.0)
cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, twoToThe63)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
bThen := s.f.NewBlock(ssa.BlockPlain)
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
a0 := s.newValue1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
s.endBlock()
bThen.AddEdgeTo(bAfter)
b.AddEdgeTo(bElse)
s.startBlock(bElse)
y := s.newValue2(cvttab.subf, ft, x, twoToThe63)
y = s.newValue1(cvttab.cvt2U, tt, y)
z := s.constInt64(tt, -9223372036854775808)
a1 := s.newValue2(ssa.OpOr64, tt, y, z)
s.vars[n] = a1
s.endBlock()
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
return s.variable(n, n.Type)
}
// ifaceType returns the value for the word containing the type.
// n is the node for the interface expression.
// v is the corresponding value.
func (s *state) ifaceType(n *Node, v *ssa.Value) *ssa.Value {
byteptr := Ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
if n.Type.IsEmptyInterface() {
// Have *eface. The type is the first word in the struct.
return s.newValue1(ssa.OpITab, byteptr, v)
}
// Have *iface.
// The first word in the struct is the *itab.
// If the *itab is nil, return 0.
// Otherwise, the second word in the *itab is the type.
tab := s.newValue1(ssa.OpITab, byteptr, v)
s.vars[&typVar] = tab
isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(isnonnil)
b.Likely = ssa.BranchLikely
bLoad := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bLoad)
b.AddEdgeTo(bEnd)
bLoad.AddEdgeTo(bEnd)
s.startBlock(bLoad)
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
s.endBlock()
s.startBlock(bEnd)
typ := s.variable(&typVar, byteptr)
delete(s.vars, &typVar)
return typ
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left)
typ := s.ifaceType(n.Left, iface) // actual concrete type
target := s.expr(typename(n.Type)) // target type
if !isdirectiface(n.Type) {
// walk rewrites ODOTTYPE/OAS2DOTTYPE into runtime calls except for this case.
Fatalf("dottype needs a direct iface type %s", n.Type)
}
if Debug_typeassert > 0 {
Warnl(n.Lineno, "type assertion inlined")
}
// TODO: If we have a nonempty interface and its itab field is nil,
// then this test is redundant and ifaceType should just branch directly to bFail.
cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cond)
b.Likely = ssa.BranchLikely
byteptr := Ptrto(Types[TUINT8])
bOk := s.f.NewBlock(ssa.BlockPlain)
bFail := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bOk)
b.AddEdgeTo(bFail)
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb)
s.rtcall(panicdottype, false, nil, typ, target, taddr)
// on success, return idata field
s.startBlock(bOk)
return s.newValue1(ssa.OpIData, n.Type, iface), nil
}
// commaok is the more complicated case because we have
// a control flow merge point.
bEnd := s.f.NewBlock(ssa.BlockPlain)
// type assertion succeeded
s.startBlock(bOk)
s.vars[&idataVar] = s.newValue1(ssa.OpIData, n.Type, iface)
s.vars[&okVar] = s.constBool(true)
s.endBlock()
bOk.AddEdgeTo(bEnd)
// type assertion failed
s.startBlock(bFail)
s.vars[&idataVar] = s.constNil(byteptr)
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
// merge point
s.startBlock(bEnd)
res = s.variable(&idataVar, byteptr)
resok = s.variable(&okVar, Types[TBOOL])
delete(s.vars, &idataVar)
delete(s.vars, &okVar)
return res, resok
}
// checkgoto checks that a goto from from to to does not
// jump into a block or jump over variable declarations.
// It is a copy of checkgoto in the pre-SSA backend,
// modified only for line number handling.
// TODO: document how this works and why it is designed the way it is.
func (s *state) checkgoto(from *Node, to *Node) {
if from.Sym == to.Sym {
return
}
nf := 0
for fs := from.Sym; fs != nil; fs = fs.Link {
nf++
}
nt := 0
for fs := to.Sym; fs != nil; fs = fs.Link {
nt++
}
fs := from.Sym
for ; nf > nt; nf-- {
fs = fs.Link
}
if fs != to.Sym {
// decide what to complain about.
// prefer to complain about 'into block' over declarations,
// so scan backward to find most recent block or else dcl.
var block *Sym
var dcl *Sym
ts := to.Sym
for ; nt > nf; nt-- {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
}
for ts != fs {
if ts.Pkg == nil {
block = ts
} else {
dcl = ts
}
ts = ts.Link
fs = fs.Link
}
lno := from.Left.Lineno
if block != nil {
yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
} else {
yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
}
}
}
// variable returns the value of a variable at the current location.
func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
v := s.vars[name]
if v == nil {
v = s.newValue0A(ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
s.vars[name] = v
s.addNamedValue(name, v)
}
return v
}
func (s *state) mem() *ssa.Value {
return s.variable(&memVar, ssa.TypeMem)
}
func (s *state) linkForwardReferences(dm *sparseDefState) {
// Build SSA graph. Each variable on its first use in a basic block
// leaves a FwdRef in that block representing the incoming value
// of that variable. This function links that ref up with possible definitions,
// inserting Phi values as needed. This is essentially the algorithm
// described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
// Differences:
// - We use FwdRef nodes to postpone phi building until the CFG is
// completely built. That way we can avoid the notion of "sealed"
// blocks.
// - Phi optimization is a separate pass (in ../ssa/phielim.go).
for len(s.fwdRefs) > 0 {
v := s.fwdRefs[len(s.fwdRefs)-1]
s.fwdRefs = s.fwdRefs[:len(s.fwdRefs)-1]
s.resolveFwdRef(v, dm)
}
}
// resolveFwdRef modifies v to be the variable's value at the start of its block.
// v must be a FwdRef op.
func (s *state) resolveFwdRef(v *ssa.Value, dm *sparseDefState) {
b := v.Block
name := v.Aux.(*Node)
v.Aux = nil
if b == s.f.Entry {
// Live variable at start of function.
if s.canSSA(name) {
if strings.HasPrefix(name.Sym.Name, "autotmp_") {
// It's likely that this is an uninitialized variable in the entry block.
s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v)
}
v.Op = ssa.OpArg
v.Aux = name
return
}
// Not SSAable. Load it.
addr := s.decladdrs[name]
if addr == nil {
// TODO: closure args reach here.
s.Unimplementedf("unhandled closure arg %s at entry to function %s", name, b.Func.Name)
}
if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
}
v.Op = ssa.OpLoad
v.AddArgs(addr, s.startmem)
return
}
if len(b.Preds) == 0 {
// This block is dead; we have no predecessors and we're not the entry block.
// It doesn't matter what we use here as long as it is well-formed.
v.Op = ssa.OpUnknown
return
}
// Find variable value on each predecessor.
var argstore [4]*ssa.Value
args := argstore[:0]
for _, e := range b.Preds {
p := e.Block()
p = dm.FindBetterDefiningBlock(name, p) // try sparse improvement on p
args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
}
// Decide if we need a phi or not. We need a phi if there
// are two different args (which are both not v).
var w *ssa.Value
for _, a := range args {
if a == v {
continue // self-reference
}
if a == w {
continue // already have this witness
}
if w != nil {
// two witnesses, need a phi value
v.Op = ssa.OpPhi
v.AddArgs(args...)
return
}
w = a // save witness
}
if w == nil {
s.Fatalf("no witness for reachable phi %s", v)
}
// One witness. Make v a copy of w.
v.Op = ssa.OpCopy
v.AddArg(w)
}
// lookupVarOutgoing finds the variable's value at the end of block b.
func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int32) *ssa.Value {
for {
if v, ok := s.defvars[b.ID][name]; ok {
return v
}
// The variable is not defined by b and we haven't looked it up yet.
// If b has exactly one predecessor, loop to look it up there.
// Otherwise, give up and insert a new FwdRef and resolve it later.
if len(b.Preds) != 1 {
break
}
b = b.Preds[0].Block()
}
// Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
s.defvars[b.ID][name] = v
s.addNamedValue(name, v)
return v
}
func (s *state) addNamedValue(n *Node, v *ssa.Value) {
if n.Class == Pxxx {
// Don't track our dummy nodes (&memVar etc.).
return
}
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
// Don't track autotmp_ variables.
return
}
if n.Class == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
if n.Class == PAUTO && n.Xoffset != 0 {
s.Fatalf("AUTO var with offset %s %d", n, n.Xoffset)
}
loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
}
s.f.NamedValues[loc] = append(values, v)
}
// Branch is an unresolved branch.
type Branch struct {
P *obj.Prog // branch instruction
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
// Branches remembers all the branch instructions we've seen
// and where they would like to go.
Branches []Branch
// bstart remembers where each block starts (indexed by block ID)
bstart []*obj.Prog
// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
SSEto387 map[int16]int16
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
ScratchFpMem *Node
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return Pc
}
// SetLineno sets the current source line number.
func (s *SSAGenState) SetLineno(l int32) {
lineno = l
}
// genssa appends entries to ptxt for each instruction in f.
// gcargs and gclocals are filled in with pointer maps for the frame.
func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
var s SSAGenState
e := f.Config.Frontend().(*ssaExport)
// We're about to emit a bunch of Progs.
// Since the only way to get here is to explicitly request it,
// just fail on unimplemented instead of trying to unwind our mess.
e.mustImplement = true
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
var valueProgs map[*obj.Prog]*ssa.Value
var blockProgs map[*obj.Prog]*ssa.Block
var logProgs = e.log
if logProgs {
valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
f.Logf("genssa %s\n", f.Name)
blockProgs[Pc] = f.Blocks[0]
}
if Thearch.Use387 {
s.SSEto387 = map[int16]int16{}
}
if f.Config.NeedsFpScratch {
s.ScratchFpMem = temp(Types[TUINT64])
}
// Emit basic blocks
for i, b := range f.Blocks {
s.bstart[b.ID] = Pc
// Emit values in block
Thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := Pc
Thearch.SSAGenValue(&s, v)
if logProgs {
for ; x != Pc; x = x.Link {
valueProgs[x] = v
}
}
}
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && (Debug['N'] == 0 || b.Kind == ssa.BlockCall) {
// If -N, leave next==nil so every block with successors
// ends in a JMP (except call blocks - plive doesn't like
// select{send,recv} followed by a JMP call). Helps keep
// line numbers for otherwise empty blocks.
next = f.Blocks[i+1]
}
x := Pc
Thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != Pc; x = x.Link {
blockProgs[x] = b
}
}
}
// Resolve branches
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
}
if logProgs {
for p := ptxt; p != nil; p = p.Link {
var s string
if v, ok := valueProgs[p]; ok {
s = v.String()
} else if b, ok := blockProgs[p]; ok {
s = b.String()
} else {
s = " " // most value and branch strings are 2-3 characters long
}
f.Logf("%s\t%s\n", s, p)
}
if f.Config.HTML != nil {
saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
ptxt.Ctxt.LineHist.PrintFilenameOnly = true
var buf bytes.Buffer
buf.WriteString("<code>")
buf.WriteString("<dl class=\"ssa-gen\">")
for p := ptxt; p != nil; p = p.Link {
buf.WriteString("<dt class=\"ssa-prog-src\">")
if v, ok := valueProgs[p]; ok {
buf.WriteString(v.HTML())
} else if b, ok := blockProgs[p]; ok {
buf.WriteString(b.HTML())
}
buf.WriteString("</dt>")
buf.WriteString("<dd class=\"ssa-prog\">")
buf.WriteString(html.EscapeString(p.String()))
buf.WriteString("</dd>")
buf.WriteString("</li>")
}
buf.WriteString("</dl>")
buf.WriteString("</code>")
f.Config.HTML.WriteColumn("genssa", buf.String())
ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
}
}
// Emit static data
if f.StaticData != nil {
for _, n := range f.StaticData.([]*Node) {
if !gen_as_init(n, false) {
Fatalf("non-static data marked as static: %v\n\n", n)
}
}
}
// Allocate stack frame
allocauto(ptxt)
// Generate gc bitmaps.
liveness(Curfn, ptxt, gcargs, gclocals)
// Add frame prologue. Zero ambiguously live variables.
Thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
}
// Remove leftover instrumentation from the instruction stream.
removevardef(ptxt)
f.Config.HTML.Close()
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum
p.To.Offset = offset
offset += width
nleft = nbytes - width
return nleft, offset
}
type FloatingEQNEJump struct {
Jump obj.As
Index int
}
func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
p := Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
branches = append(branches, Branch{p, b.Succs[to].Block()})
if to == 1 {
likely = -likely
}
// liblink reorders the instruction stream as it sees fit.
// Pass along what we know so liblink can make use of it.
// TODO: Once we've fully switched to SSA,
// make liblink leave our output alone.
switch likely {
case ssa.BranchUnlikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
case ssa.BranchLikely:
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
}
return branches
}
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely
switch next {
case b.Succs[0].Block():
s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
case b.Succs[1].Block():
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
default:
s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
q := Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
}
}
func AuxOffset(v *ssa.Value) (offset int64) {
if v.Aux == nil {
return 0
}
switch sym := v.Aux.(type) {
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
return n.Xoffset
}
return 0
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
}
func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
v.Fatalf("bad AddAux addr %v", a)
}
// add integer offset
a.Offset += offset
// If no additional symbol offset, we're done.
if v.Aux == nil {
return
}
// Add symbol's offset from its base register.
switch sym := v.Aux.(type) {
case *ssa.ExternSymbol:
a.Name = obj.NAME_EXTERN
switch s := sym.Sym.(type) {
case *Sym:
a.Sym = Linksym(s)
case *obj.LSym:
a.Sym = s
default:
v.Fatalf("ExternSymbol.Sym is %T", s)
}
case *ssa.ArgSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_PARAM
a.Node = n
a.Sym = Linksym(n.Orig.Sym)
a.Offset += n.Xoffset // TODO: why do I have to add this here? I don't for auto variables.
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
a.Name = obj.NAME_AUTO
a.Node = n
a.Sym = Linksym(n.Sym)
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
}
// SizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t.
func SizeAlignAuxInt(t *Type) int64 {
return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
}
// extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
size := v.Type.Size()
if size == s.config.IntSize {
return v
}
if size > s.config.IntSize {
// truncate 64-bit indexes on 32-bit pointer archs. Test the
// high word and branch to out-of-bounds failure if it is not 0.
if Debug['B'] == 0 {
hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
s.check(cmp, panicfn)
}
return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
}
// Extend value to the required size
var op ssa.Op
if v.Type.IsSigned() {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpSignExt8to32
case 18:
op = ssa.OpSignExt8to64
case 24:
op = ssa.OpSignExt16to32
case 28:
op = ssa.OpSignExt16to64
case 48:
op = ssa.OpSignExt32to64
default:
s.Fatalf("bad signed index extension %s", v.Type)
}
} else {
switch 10*size + s.config.IntSize {
case 14:
op = ssa.OpZeroExt8to32
case 18:
op = ssa.OpZeroExt8to64
case 24:
op = ssa.OpZeroExt16to32
case 28:
op = ssa.OpZeroExt16to64
case 48:
op = ssa.OpZeroExt32to64
default:
s.Fatalf("bad unsigned index extension %s", v.Type)
}
}
return s.newValue1(op, Types[TINT], v)
}
// SSAReg returns the register to which v has been allocated.
func SSAReg(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID]
if reg == nil {
v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
}
return reg.(*ssa.Register)
}
// SSAReg0 returns the register to which the first output of v has been allocated.
func SSAReg0(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[0]
if reg == nil {
v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
}
return reg.(*ssa.Register)
}
// SSAReg1 returns the register to which the second output of v has been allocated.
func SSAReg1(v *ssa.Value) *ssa.Register {
reg := v.Block.Func.RegAlloc[v.ID].(ssa.LocPair)[1]
if reg == nil {
v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
}
return reg.(*ssa.Register)
}
// SSARegNum returns the register number (in cmd/internal/obj numbering) to which v has been allocated.
func SSARegNum(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg(v).Num]
}
// SSARegNum0 returns the register number (in cmd/internal/obj numbering) to which the first output of v has been allocated.
func SSARegNum0(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg0(v).Num]
}
// SSARegNum1 returns the register number (in cmd/internal/obj numbering) to which the second output of v has been allocated.
func SSARegNum1(v *ssa.Value) int16 {
return Thearch.SSARegToReg[SSAReg1(v).Num]
}
// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
// Called during ssaGenValue.
func CheckLoweredPhi(v *ssa.Value) {
if v.Op != ssa.OpPhi {
v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
}
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
}
// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
// That register contains the closure pointer on closure entry.
func CheckLoweredGetClosurePtr(v *ssa.Value) {
entry := v.Block.Func.Entry
if entry != v.Block || entry.Values[0] != v {
Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
}
}
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int {
t := n.Left.Type
f := n.Sym
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
var i int
for _, t1 := range t.Fields().Slice() {
if t1.Sym != f {
i++
continue
}
if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
}
panic(fmt.Sprintf("can't find field in expr %s\n", n))
// TODO: keep the result of this function somewhere in the ODOT Node
// so we don't have to recompute it each time we need it.
}
// ssaExport exports a bunch of compiler services for the ssa backend.
type ssaExport struct {
log bool
unimplemented bool
mustImplement bool
}
func (s *ssaExport) TypeBool() ssa.Type { return Types[TBOOL] }
func (s *ssaExport) TypeInt8() ssa.Type { return Types[TINT8] }
func (s *ssaExport) TypeInt16() ssa.Type { return Types[TINT16] }
func (s *ssaExport) TypeInt32() ssa.Type { return Types[TINT32] }
func (s *ssaExport) TypeInt64() ssa.Type { return Types[TINT64] }
func (s *ssaExport) TypeUInt8() ssa.Type { return Types[TUINT8] }
func (s *ssaExport) TypeUInt16() ssa.Type { return Types[TUINT16] }
func (s *ssaExport) TypeUInt32() ssa.Type { return Types[TUINT32] }
func (s *ssaExport) TypeUInt64() ssa.Type { return Types[TUINT64] }
func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
func (s *ssaExport) TypeInt() ssa.Type { return Types[TINT] }
func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
func (s *ssaExport) TypeString() ssa.Type { return Types[TSTRING] }
func (s *ssaExport) TypeBytePtr() ssa.Type { return Ptrto(Types[TUINT8]) }
// StringData returns a symbol (a *Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (*ssaExport) StringData(s string) interface{} {
// TODO: is idealstring correct? It might not matter...
_, data := stringsym(s)
return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
}
func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
e.mustImplement = true // This modifies the input to SSA, so we want to make sure we succeed from here!
return n
}
func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(Types[TUINT8])
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this string up into two separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := Ptrto(Types[TUINT8])
if n.Class == PAUTO && !n.Addrtaken {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
f = ".type"
}
c := e.namedAuto(n.Sym.Name+f, t)
d := e.namedAuto(n.Sym.Name+".data", t)
return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(name.Type.ElemType().(*Type))
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this slice up into three separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
c := e.namedAuto(n.Sym.Name+".cap", lenType)
return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
}
func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *Type
if s == 8 {
t = Types[TFLOAT64]
} else {
t = Types[TFLOAT32]
}
if n.Class == PAUTO && !n.Addrtaken {
// Split this complex up into two separate variables.
c := e.namedAuto(n.Sym.Name+".real", t)
d := e.namedAuto(n.Sym.Name+".imag", t)
return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
}
func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
var t *Type
if name.Type.IsSigned() {
t = Types[TINT32]
} else {
t = Types[TUINT32]
}
if n.Class == PAUTO && !n.Addrtaken {
// Split this int64 up into two separate variables.
h := e.namedAuto(n.Sym.Name+".hi", t)
l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32])
return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
}
// Return the two parts of the larger variable.
// Assuming little endian (we don't support big endian 32-bit architecture yet)
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
}
func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
n := name.N.(*Node)
st := name.Type
ft := st.FieldType(i)
if n.Class == PAUTO && !n.Addrtaken {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
return ssa.LocalSlot{N: x, Type: ft, Off: 0}
}
return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
}
// namedAuto returns a new AUTO variable with the given name and type.
func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
t := typ.(*Type)
s := &Sym{Name: name, Pkg: localpkg}
n := Nod(ONAME, nil, nil)
s.Def = n
s.Def.Used = true
n.Sym = s
n.Type = t
n.Class = PAUTO
n.Addable = true
n.Ullman = 1
n.Esc = EscNever
n.Xoffset = 0
n.Name.Curfn = Curfn
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
dowidth(t)
e.mustImplement = true
return n
}
func (e *ssaExport) CanSSA(t ssa.Type) bool {
return canSSAType(t.(*Type))
}
func (e *ssaExport) Line(line int32) string {
return linestr(line)
}
// Log logs a message from the compiler.
func (e *ssaExport) Logf(msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if e.log && !e.unimplemented {
fmt.Printf(msg, args...)
}
}
func (e *ssaExport) Log() bool {
return e.log
}
// Fatal reports a compiler error and exits.
func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
// If e was marked as unimplemented, anything could happen. Ignore.
if !e.unimplemented {
lineno = line
Fatalf(msg, args...)
}
}
// Unimplemented reports that the function cannot be compiled.
// It will be removed once SSA work is complete.
func (e *ssaExport) Unimplementedf(line int32, msg string, args ...interface{}) {
if e.mustImplement {
lineno = line
Fatalf(msg, args...)
}
const alwaysLog = false // enable to calculate top unimplemented features
if !e.unimplemented && (e.log || alwaysLog) {
// first implementation failure, print explanation
fmt.Printf("SSA unimplemented: "+msg+"\n", args...)
}
e.unimplemented = true
}
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
Warnl(line, fmt_, args...)
}
func (e *ssaExport) Debug_checknil() bool {
return Debug_checknil != 0
}
func (n *Node) Typ() ssa.Type {
return n.Type
}
|
[
"\"SSATEST\"",
"\"GOSSAFUNC\"",
"\"GOSSAPKG\"",
"\"GOSSAFUNC\"",
"\"GOSSAFUNC\""
] |
[] |
[
"GOSSAPKG",
"SSATEST",
"GOSSAFUNC"
] |
[]
|
["GOSSAPKG", "SSATEST", "GOSSAFUNC"]
|
go
| 3 | 0 | |
examples/qr/generate/generateAQrCode/main.go
|
package main
import (
"fmt"
"os"
"go.m3o.com/qr"
)
// Generate a QR code with a specific text and size
func main() {
qrService := qr.NewQrService(os.Getenv("M3O_API_TOKEN"))
rsp, err := qrService.Generate(&qr.GenerateRequest{
Size: 300,
Text: "https://m3o.com/qr",
})
fmt.Println(rsp, err)
}
|
[
"\"M3O_API_TOKEN\""
] |
[] |
[
"M3O_API_TOKEN"
] |
[]
|
["M3O_API_TOKEN"]
|
go
| 1 | 0 | |
bin/ingest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Matt Post <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ingests data into the Anthology. It takes a list of one or more
ACLPUB proceedings/ directories and does the following:
- executes some basic sanity checks
- applies normalization to names and titles (e.g, fixed-case protection)
- generates the nexted XML in the Anthology repository
- copies the PDFs and attachments into place for rsyncing to the server
Updated in March 2020, this script replaces:
- the old ingest.py (which converted the old ACLPUB flat XML format)
- anthologize.pl in ACLPUB
- anthology_xml.py in ACLPUB
"""
import argparse
import os
import re
import readline
import shutil
import sys
import lxml.etree as etree
from collections import defaultdict, OrderedDict
from datetime import datetime
from normalize_anth import normalize
from anthology.utils import (
make_simple_element,
build_anthology_id,
deconstruct_anthology_id,
indent,
compute_hash_from_file,
)
from anthology.index import AnthologyIndex
from anthology.people import PersonName
from anthology.bibtex import read_bibtex
from anthology.venues import VenueIndex
from itertools import chain
from typing import Dict, Any
def log(text: str, fake: bool = False):
message = "[DRY RUN] " if fake else ""
print(f"{message}{text}", file=sys.stderr)
def read_meta(path: str) -> Dict[str, Any]:
meta = {"chairs": []}
with open(path) as instream:
for line in instream:
key, value = line.rstrip().split(" ", maxsplit=1)
if key.startswith("chair"):
meta["chairs"].append(value)
else:
meta[key] = value
return meta
def maybe_copy(source_path, dest_path):
"""Copies the file if it's different from the target."""
if not os.path.exists(dest_path) or compute_hash_from_file(
source_path
) != compute_hash_from_file(dest_path):
log(f"Copying {source_path} -> {dest_path}", args.dry_run)
shutil.copyfile(source_path, dest_path)
def bib2xml(bibfilename, anthology_id):
"""
Moved here from ACLPUB's anthology_xml.py script.
"""
fields = [
'title',
'author',
'editor',
'booktitle',
'month',
'year',
'address',
'publisher',
'pages',
'abstract',
'url',
'doi',
'language',
]
collection_id, volume_name, paper_no = deconstruct_anthology_id(anthology_id)
if paper_no == '':
return # skip the master bib file; we only process the individual files
bibdata = read_bibtex(bibfilename)
if len(bibdata.entries) != 1:
log(f"more than one entry in {bibfilename}")
bibkey, bibentry = bibdata.entries.items()[0]
if len(bibentry.fields) == 0:
log(f"parsing bib of paper {paper_no} failed")
sys.exit(1)
paper = make_simple_element("paper", attrib={"id": paper_no})
for field in list(bibentry.fields) + list(bibentry.persons):
if field not in fields:
log(f"unknown field {field}")
for field in fields:
if field in ['author', 'editor']:
if field in bibentry.persons:
for person in bibentry.persons[field]:
first_text = ' '.join(person.bibtex_first_names)
last_text = ' '.join(person.prelast_names + person.last_names)
if person.lineage_names:
last_text += ', ' + ' '.join(person.lineage_names)
# Don't distinguish between authors that have only a first name
# vs. authors that have only a last name; always make it a last name.
if last_text.strip() in [
'',
'-',
]: # Some START users have '-' for null
last_text = first_text
first_text = ''
name_node = make_simple_element(field, parent=paper)
make_simple_element("first", first_text, parent=name_node)
make_simple_element("last", last_text, parent=name_node)
else:
if field == 'url':
value = f"{anthology_id}"
elif field in bibentry.fields:
value = bibentry.fields[field]
elif field == 'bibtype':
value = bibentry.type
elif field == 'bibkey':
value = bibkey
else:
continue
make_simple_element(field, text=value, parent=paper)
return paper
def main(args):
collections = defaultdict(OrderedDict)
volumes = {}
anthology_datadir = os.path.join(os.path.dirname(sys.argv[0]), "..", "data")
venue_keys = [
venue["slug"].lower() for _, venue in VenueIndex(srcdir=anthology_datadir).items()
]
# Build list of volumes, confirm uniqueness
unseen_venues = []
for proceedings in args.proceedings:
meta = read_meta(os.path.join(proceedings, "meta"))
venue_name = meta["abbrev"].lower()
if venue_name not in venue_keys:
unseen_venues.append(meta["abbrev"])
meta["path"] = proceedings
meta["collection_id"] = collection_id = (
meta["year"] + "." + meta["abbrev"].lower()
)
volume_name = meta["volume"].lower()
volume_full_id = f"{collection_id}-{volume_name}"
if volume_full_id in volumes:
print("Error: ")
collections[collection_id][volume_name] = {}
volumes[volume_full_id] = meta
# Make sure all venues exist
if len(unseen_venues) > 0:
print("FATAL: The following venue(s) don't exist in venues.yaml")
for venue in unseen_venues:
print(f"- {venue}")
print("Please create entries for them and re-ingest.")
sys.exit(1)
# Copy over the PDFs and attachments
for volume, meta in volumes.items():
root_path = os.path.join(meta["path"], "cdrom")
collection_id = meta["collection_id"]
venue_name = meta["abbrev"].lower()
volume_name = meta["volume"].lower()
year = meta["year"]
pdfs_dest_dir = os.path.join(args.pdfs_dir, venue_name)
if not os.path.exists(pdfs_dest_dir):
os.makedirs(pdfs_dest_dir)
# copy the book
book_src_filename = meta["abbrev"] + "-" + year
book_src_path = os.path.join(root_path, book_src_filename) + ".pdf"
book_dest_path = None
if os.path.exists(book_src_path):
book_dest_path = (
os.path.join(pdfs_dest_dir, f"{collection_id}-{volume_name}") + ".pdf"
)
if not args.dry_run:
maybe_copy(book_src_path, book_dest_path)
# copy the paper PDFs
pdf_src_dir = os.path.join(root_path, "pdf")
for pdf_file in os.listdir(pdf_src_dir):
# names are {abbrev}{number}.pdf
match = re.match(rf".*\.(\d+)\.pdf", pdf_file)
if match is not None:
paper_num = int(match[1])
paper_id_full = f"{collection_id}-{volume_name}.{paper_num}"
bib_path = os.path.join(
root_path,
"bib",
pdf_file.replace("/pdf", "/bib/").replace(".pdf", ".bib"),
)
pdf_src_path = os.path.join(pdf_src_dir, pdf_file)
pdf_dest_path = os.path.join(
pdfs_dest_dir, f"{collection_id}-{volume_name}.{paper_num}.pdf"
)
if not args.dry_run:
maybe_copy(pdf_src_path, pdf_dest_path)
collections[collection_id][volume_name][paper_num] = {
"anthology_id": paper_id_full,
"bib": bib_path,
"pdf": pdf_dest_path,
"attachments": [],
}
# copy the attachments
if os.path.exists(os.path.join(root_path, "additional")):
attachments_dest_dir = os.path.join(args.attachments_dir, venue_name)
if not os.path.exists(attachments_dest_dir):
os.makedirs(attachments_dest_dir)
for attachment_file in os.listdir(os.path.join(root_path, "additional")):
attachment_file_path = os.path.join(
root_path, "additional", attachment_file
)
match = re.match(
rf"{year}\.{venue_name}-\w+\.(\d+)_?(\w+)\.(\w+)$", attachment_file
)
if match is None:
print(
f"* Warning: no attachment match for {attachment_file}",
file=sys.stderr,
)
sys.exit(2)
paper_num, type_, ext = match.groups()
paper_num = int(paper_num)
file_name = f"{collection_id}-{volume_name}.{paper_num}.{type_}.{ext}"
dest_path = os.path.join(attachments_dest_dir, file_name)
if not args.dry_run and not os.path.exists(dest_path):
log(f"Copying {attachment_file} -> {dest_path}", args.dry_run)
shutil.copyfile(attachment_file_path, dest_path)
collections[collection_id][volume_name][paper_num]["attachments"].append(
(dest_path, type_)
)
people = AnthologyIndex(None, srcdir=anthology_datadir)
def disambiguate_name(node, anth_id):
name = PersonName.from_element(node)
ids = people.get_ids(name)
if len(ids) > 1:
choice = -1
while choice < 0 or choice >= len(ids):
print(
f"({anth_id}): ambiguous author {name}; Please choose from the following:"
)
for i, id_ in enumerate(ids):
print(f"[{i}] {id_} ({people.get_comment(id_)})")
choice = int(input("--> "))
node.attrib["id"] = ids[choice]
for collection_id, collection in collections.items():
# Newly added volumes, so we can normalize and name-disambig later
newly_added_volumes = []
collection_file = os.path.join(
args.anthology_dir, "data", "xml", f"{collection_id}.xml"
)
if os.path.exists(collection_file):
root_node = etree.parse(collection_file).getroot()
else:
root_node = make_simple_element("collection", attrib={"id": collection_id})
for volume_id, volume in collection.items():
volume_node = make_simple_element(
"volume",
attrib={"id": volume_id, "ingest-date": args.ingest_date},
)
# Replace the existing one if present
existing_volume_node = root_node.find(f"./volume[@id='{volume_id}']")
for i, child in enumerate(root_node):
if child.attrib["id"] == volume_id:
root_node[i] = volume_node
break
else:
root_node.append(volume_node)
meta_node = None
for paper_num, paper in sorted(volume.items()):
paper_id_full = paper["anthology_id"]
bibfile = paper["bib"]
paper_node = bib2xml(bibfile, paper_id_full)
# print(etree.tostring(paper_node, pretty_print=True))
if paper_node.attrib["id"] == "0":
# create metadata subtree
meta_node = make_simple_element("meta", parent=volume_node)
title_node = paper_node.find("title")
title_node.tag = "booktitle"
meta_node.append(title_node)
for author_or_editor in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
meta_node.append(author_or_editor)
author_or_editor.tag = "editor"
meta_node.append(paper_node.find("publisher"))
meta_node.append(paper_node.find("address"))
meta_node.append(paper_node.find("month"))
meta_node.append(paper_node.find("year"))
if book_dest_path is not None:
make_simple_element(
"url",
text=f"{collection_id}-{volume_name}",
attrib={"hash": compute_hash_from_file(book_dest_path)},
parent=meta_node,
)
# modify frontmatter tag
paper_node.tag = "frontmatter"
del paper_node.attrib["id"]
else:
# remove unneeded fields
for child in paper_node:
if child.tag in [
"editor",
"address",
"booktitle",
"publisher",
"year",
"month",
]:
paper_node.remove(child)
url = paper_node.find("./url")
if url is not None:
url.attrib["hash"] = compute_hash_from_file(paper["pdf"])
for path, type_ in paper["attachments"]:
make_simple_element(
"attachment",
text=os.path.basename(path),
attrib={
"type": type_,
"hash": compute_hash_from_file(path),
},
parent=paper_node,
)
if len(paper_node) > 0:
volume_node.append(paper_node)
# Normalize
for oldnode in paper_node:
normalize(oldnode, informat="latex")
for name_node in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
disambiguate_name(name_node, paper_id_full)
# Other data from the meta file
if "isbn" in meta:
make_simple_element("isbn", meta["isbn"], parent=meta_node)
indent(root_node)
tree = etree.ElementTree(root_node)
tree.write(
collection_file, encoding="UTF-8", xml_declaration=True, with_tail=True
)
if __name__ == "__main__":
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser = argparse.ArgumentParser()
parser.add_argument(
"proceedings", nargs="+", help="List of paths to ACLPUB proceedings/ directories."
)
parser.add_argument(
"--ingest-date",
"-d",
type=str,
default=today,
help="Ingestion date as YYYY-MM-DD. Default: %(default)s.",
)
anthology_path = os.path.join(os.path.dirname(sys.argv[0]), "..")
parser.add_argument(
"--anthology-dir",
"-r",
default=anthology_path,
help="Root path of ACL Anthology Github repo. Default: %(default)s.",
)
pdfs_path = os.path.join(os.environ["HOME"], "anthology-files", "pdf")
parser.add_argument(
"--pdfs-dir", "-p", default=pdfs_path, help="Root path for placement of PDF files"
)
attachments_path = os.path.join(os.environ["HOME"], "anthology-files", "attachments")
parser.add_argument(
"--attachments-dir",
"-a",
default=attachments_path,
help="Root path for placement of PDF files",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", help="Don't actually copy anything."
)
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
controller/userapi/handler_showuserbyid.go
|
package userapi
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/raismaulana/blogP/application/apperror"
"github.com/raismaulana/blogP/infrastructure/log"
"github.com/raismaulana/blogP/infrastructure/util"
"github.com/raismaulana/blogP/usecase/showuserbyid"
)
// showUserByIDHandler ...
func (r *Controller) showUserByIDHandler(inputPort showuserbyid.Inport) gin.HandlerFunc {
return func(c *gin.Context) {
ctx := log.Context(c.Request.Context())
id, err := strconv.ParseInt(c.Param("id_user"), 10, 64)
if err != nil {
log.Error(ctx, err.Error())
c.JSON(http.StatusBadRequest, NewErrorResponse(apperror.NumberOnlyParam))
return
}
req := showuserbyid.InportRequest{
ID: id,
}
log.Info(ctx, util.MustJSON(req))
res, err := inputPort.Execute(ctx, req)
if err != nil {
log.Error(ctx, err.Error())
c.JSON(http.StatusOK, NewErrorResponse(err))
return
}
log.Info(ctx, util.MustJSON(res))
c.JSON(http.StatusOK, NewSuccessResponse(res))
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
ECG_combine_leads.py
|
# -*- coding: utf-8 -*-
"""
Created on 2019
@author: Xiajun
# CPSC_combine_leads.py: 组合各导联神经网络的输出概率
"""
import os
import warnings
import numpy as np
import tensorflow as tf
from keras import backend as bk
from keras.models import load_model
from keras.utils import to_categorical
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from ECG_config import Config
import ECG_utils as utils
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
warnings.filterwarnings("ignore")
config = Config()
config.MODEL_PATH = 'G:/EngineerPractice/ECG20190221/ECG_SIGNAL/Net_models/'
# 心电图数据所在的文件的名字
records_name = np.array(os.listdir(config.DATA_PATH))
# 心电图的标签,即心电图的分类
records_label = np.load(config.REVISED_LABEL) - 1
# 心电图分类的数量
class_num = len(np.unique(records_label))
# 将数据集随机划分为测试集和训练集
train_val_records, test_records, train_val_labels, test_labels = train_test_split(
records_name, records_label, test_size=0.2, random_state=config.RANDOM_STATE)
# 这里不是很懂
train_records, val_records, train_labels, val_labels = train_test_split(
train_val_records, train_val_labels, test_size=0.2, random_state=config.RANDOM_STATE)
# 通过随机过采样使各类样本数目平衡
train_records, train_labels = utils.oversample_balance(train_records, train_labels, config.RANDOM_STATE)
# 通过随机过采样使各类样本数目平衡
val_records, val_labels = utils.oversample_balance(val_records, val_labels, config.RANDOM_STATE)
for i in range(config.LEAD_NUM): # 分别载入各个导联对应的模型并进行概率预测,并拼接
TARGET_LEAD = i
train_x = utils.Fetch_Pats_Lbs_sLead(train_records, Path=config.DATA_PATH,
target_lead=TARGET_LEAD, seg_num=config.SEG_NUM,
seg_length=config.SEG_LENGTH)
train_y = to_categorical(train_labels, num_classes=class_num)
val_x = utils.Fetch_Pats_Lbs_sLead(val_records, Path=config.DATA_PATH,
target_lead=TARGET_LEAD, seg_num=config.SEG_NUM,
seg_length=config.SEG_LENGTH)
val_y = to_categorical(val_labels, num_classes=class_num)
for j in range(train_x.shape[0]):
train_x[j, :, :] = scale(train_x[j, :, :], axis=0)
for j in range(val_x.shape[0]):
val_x[j, :, :] = scale(val_x[j, :, :], axis=0)
model_name = 'net_lead_' + str(TARGET_LEAD) + '.hdf5'
model = load_model(config.MODEL_PATH + model_name)
pred_nnet_rt = model.predict(train_x, batch_size=64, verbose=1)
del train_x
pred_nnet_vt = model.predict(val_x, batch_size=64, verbose=1)
del val_x
test_x = utils.Fetch_Pats_Lbs_sLead(test_records, Path=config.DATA_PATH,
target_lead=TARGET_LEAD, seg_num=config.SEG_NUM,
seg_length=config.SEG_LENGTH)
test_y = to_categorical(test_labels, num_classes=class_num)
for j in range(test_x.shape[0]):
test_x[j, :, :] = scale(test_x[j, :, :], axis=0)
pred_nnet_tt = model.predict(test_x, batch_size=64, verbose=1)
del test_x
if i == 0:
pred_nnet_r = pred_nnet_rt[:, 1:]
pred_nnet_v = pred_nnet_vt[:, 1:]
pred_nnet_t = pred_nnet_tt[:, 1:]
else:
pred_nnet_r = np.concatenate((pred_nnet_r, pred_nnet_rt[:, 1:]), axis=1)
pred_nnet_v = np.concatenate((pred_nnet_v, pred_nnet_vt[:, 1:]), axis=1)
pred_nnet_t = np.concatenate((pred_nnet_t, pred_nnet_tt[:, 1:]), axis=1)
del model
bk.clear_session()
tf.reset_default_graph()
np.save(config.MODEL_PATH + 'pred_nnet_r.npy', pred_nnet_r)
np.save(config.MODEL_PATH + 'pred_nnet_v.npy', pred_nnet_v)
np.save(config.MODEL_PATH + 'pred_nnet_t.npy', pred_nnet_t)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
backend/importe_33537/settings.py
|
"""
Django settings for importe_33537 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'importe_33537.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'importe_33537.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
[] |
[] |
[
"SETTINGS_NAME"
] |
[]
|
["SETTINGS_NAME"]
|
python
| 1 | 0 | |
private/signer/v2/v2_test.go
|
package v2
import (
"bytes"
"net/http"
"net/url"
"os"
"strconv"
"testing"
"time"
"github.com/IBM/ibm-cos-sdk-go/aws"
"github.com/IBM/ibm-cos-sdk-go/aws/credentials"
"github.com/IBM/ibm-cos-sdk-go/aws/request"
"github.com/IBM/ibm-cos-sdk-go/awstesting"
)
type signerBuilder struct {
ServiceName string
Region string
SignTime time.Time
Query url.Values
Method string
SessionToken string
}
func (sb signerBuilder) BuildSigner() signer {
endpoint := "https://" + sb.ServiceName + "." + sb.Region + ".amazonaws.com"
var req *http.Request
if sb.Method == "POST" {
body := []byte(sb.Query.Encode())
reader := bytes.NewReader(body)
req, _ = http.NewRequest(sb.Method, endpoint, reader)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(body)))
} else {
req, _ = http.NewRequest(sb.Method, endpoint, nil)
req.URL.RawQuery = sb.Query.Encode()
}
sig := signer{
Request: req,
Time: sb.SignTime,
Credentials: credentials.NewStaticCredentials(
"AKID",
"SECRET",
sb.SessionToken),
}
if os.Getenv("DEBUG") != "" {
sig.Debug = aws.LogDebug
sig.Logger = aws.NewDefaultLogger()
}
return sig
}
func TestSignRequestWithAndWithoutSession(t *testing.T) {
// have to create more than once, so use a function
newQuery := func() url.Values {
query := make(url.Values)
query.Add("Action", "CreateDomain")
query.Add("DomainName", "TestDomain-1437033376")
query.Add("Version", "2009-04-15")
return query
}
// create request without a SecurityToken (session) in the credentials
query := newQuery()
timestamp := time.Date(2015, 7, 16, 7, 56, 16, 0, time.UTC)
builder := signerBuilder{
Method: "POST",
ServiceName: "sdb",
Region: "ap-southeast-2",
SignTime: timestamp,
Query: query,
}
signer := builder.BuildSigner()
err := signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := 8, len(signer.Query); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "AKID", signer.Query.Get("AWSAccessKeyId"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2015-07-16T07:56:16Z", signer.Query.Get("Timestamp"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "HmacSHA256", signer.Query.Get("SignatureMethod"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2", signer.Query.Get("SignatureVersion"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "tm4dX8Ks7pzFSVHz7qHdoJVXKRLuC4gWz9eti60d8ks=", signer.Query.Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "CreateDomain", signer.Query.Get("Action"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "TestDomain-1437033376", signer.Query.Get("DomainName"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "2009-04-15", signer.Query.Get("Version"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
// should not have a SecurityToken parameter
_, ok := signer.Query["SecurityToken"]
if ok {
t.Errorf("expect SecurityToken found, was not")
}
// now sign again, this time with a security token (session)
query = newQuery()
builder.SessionToken = "SESSION"
signer = builder.BuildSigner()
err = signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := 9, len(signer.Query); e != a { // expect one more parameter
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "Ch6qv3rzXB1SLqY2vFhsgA1WQ9rnQIE2WJCigOvAJwI=", signer.Query.Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "SESSION", signer.Query.Get("SecurityToken"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestMoreComplexSignRequest(t *testing.T) {
query := make(url.Values)
query.Add("Action", "PutAttributes")
query.Add("DomainName", "TestDomain-1437041569")
query.Add("Version", "2009-04-15")
query.Add("Attribute.2.Name", "Attr2")
query.Add("Attribute.2.Value", "Value2")
query.Add("Attribute.2.Replace", "true")
query.Add("Attribute.1.Name", "Attr1-%\\+ %")
query.Add("Attribute.1.Value", " \tValue1 +!@#$%^&*(){}[]\"';:?/.>,<\x12\x00")
query.Add("Attribute.1.Replace", "true")
query.Add("ItemName", "Item 1")
timestamp := time.Date(2015, 7, 16, 10, 12, 51, 0, time.UTC)
builder := signerBuilder{
Method: "POST",
ServiceName: "sdb",
Region: "ap-southeast-2",
SignTime: timestamp,
Query: query,
SessionToken: "SESSION",
}
signer := builder.BuildSigner()
err := signer.Sign()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := "WNdE62UJKLKoA6XncVY/9RDbrKmcVMdQPQOTAs8SgwQ=", signer.signature; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestGet(t *testing.T) {
svc := awstesting.NewClient(&aws.Config{
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
Region: aws.String("ap-southeast-2"),
})
r := svc.NewRequest(
&request.Operation{
Name: "OpName",
HTTPMethod: "GET",
HTTPPath: "/",
},
nil,
nil,
)
r.Build()
if e, a := "GET", r.HTTPRequest.Method; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "", r.HTTPRequest.URL.Query().Get("Signature"); e != a {
t.Errorf("expect %v, got %v", e, a)
}
SignSDKRequest(r)
if r.Error != nil {
t.Fatalf("expect no error, got %v", r.Error)
}
t.Logf("Signature: %s", r.HTTPRequest.URL.Query().Get("Signature"))
if len(r.HTTPRequest.URL.Query().Get("Signature")) == 0 {
t.Errorf("expect signature to be set, was not")
}
}
func TestAnonymousCredentials(t *testing.T) {
svc := awstesting.NewClient(&aws.Config{
Credentials: credentials.AnonymousCredentials,
Region: aws.String("ap-southeast-2"),
})
r := svc.NewRequest(
&request.Operation{
Name: "PutAttributes",
HTTPMethod: "POST",
HTTPPath: "/",
},
nil,
nil,
)
r.Build()
SignSDKRequest(r)
req := r.HTTPRequest
req.ParseForm()
if a := req.PostForm.Get("Signature"); len(a) != 0 {
t.Errorf("expect no signature, got %v", a)
}
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
configuration = os.getenv('ENVIRONMENT', 'development').title()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ff4.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', configuration)
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
python
| 1 | 0 | |
app/plugin_test.go
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package app
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/mattermost/mattermost-server/v5/mlog"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/plugin"
"github.com/mattermost/mattermost-server/v5/testlib"
"github.com/mattermost/mattermost-server/v5/utils"
"github.com/mattermost/mattermost-server/v5/utils/fileutils"
)
func getHashedKey(key string) string {
hash := sha256.New()
hash.Write([]byte(key))
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}
func TestPluginKeyValueStore(t *testing.T) {
th := Setup(t)
defer th.TearDown()
pluginId := "testpluginid"
defer func() {
assert.Nil(t, th.App.DeletePluginKey(pluginId, "key"))
assert.Nil(t, th.App.DeletePluginKey(pluginId, "key2"))
assert.Nil(t, th.App.DeletePluginKey(pluginId, "key3"))
assert.Nil(t, th.App.DeletePluginKey(pluginId, "key4"))
}()
assert.Nil(t, th.App.SetPluginKey(pluginId, "key", []byte("test")))
ret, err := th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Test inserting over existing entries
assert.Nil(t, th.App.SetPluginKey(pluginId, "key", []byte("test2")))
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test2"), ret)
// Test getting non-existent key
ret, err = th.App.GetPluginKey(pluginId, "notakey")
assert.Nil(t, err)
assert.Nil(t, ret)
// Test deleting non-existent keys.
assert.Nil(t, th.App.DeletePluginKey(pluginId, "notrealkey"))
// Verify behaviour for the old approach that involved storing the hashed keys.
hashedKey2 := getHashedKey("key2")
kv := &model.PluginKeyValue{
PluginId: pluginId,
Key: hashedKey2,
Value: []byte("test"),
ExpireAt: 0,
}
_, err = th.App.Srv().Store.Plugin().SaveOrUpdate(kv)
assert.Nil(t, err)
// Test fetch by keyname (this key does not exist but hashed key will be used for lookup)
ret, err = th.App.GetPluginKey(pluginId, "key2")
assert.Nil(t, err)
assert.Equal(t, kv.Value, ret)
// Test fetch by hashed keyname
ret, err = th.App.GetPluginKey(pluginId, hashedKey2)
assert.Nil(t, err)
assert.Equal(t, kv.Value, ret)
// Test ListKeys
assert.Nil(t, th.App.SetPluginKey(pluginId, "key3", []byte("test3")))
assert.Nil(t, th.App.SetPluginKey(pluginId, "key4", []byte("test4")))
list, err := th.App.ListPluginKeys(pluginId, 0, 1)
assert.Nil(t, err)
assert.Equal(t, []string{"key"}, list)
list, err = th.App.ListPluginKeys(pluginId, 1, 1)
assert.Nil(t, err)
assert.Equal(t, []string{"key3"}, list)
list, err = th.App.ListPluginKeys(pluginId, 0, 4)
assert.Nil(t, err)
assert.Equal(t, []string{"key", "key3", "key4", hashedKey2}, list)
list, err = th.App.ListPluginKeys(pluginId, 0, 2)
assert.Nil(t, err)
assert.Equal(t, []string{"key", "key3"}, list)
list, err = th.App.ListPluginKeys(pluginId, 1, 2)
assert.Nil(t, err)
assert.Equal(t, []string{"key4", hashedKey2}, list)
list, err = th.App.ListPluginKeys(pluginId, 2, 2)
assert.Nil(t, err)
assert.Equal(t, []string{}, list)
// List Keys bad input
list, err = th.App.ListPluginKeys(pluginId, 0, 0)
assert.Nil(t, err)
assert.Equal(t, []string{"key", "key3", "key4", hashedKey2}, list)
list, err = th.App.ListPluginKeys(pluginId, 0, -1)
assert.Nil(t, err)
assert.Equal(t, []string{"key", "key3", "key4", hashedKey2}, list)
list, err = th.App.ListPluginKeys(pluginId, -1, 1)
assert.Nil(t, err)
assert.Equal(t, []string{"key"}, list)
list, err = th.App.ListPluginKeys(pluginId, -1, 0)
assert.Nil(t, err)
assert.Equal(t, []string{"key", "key3", "key4", hashedKey2}, list)
}
func TestPluginKeyValueStoreCompareAndSet(t *testing.T) {
th := Setup(t)
defer th.TearDown()
pluginId := "testpluginid"
defer func() {
assert.Nil(t, th.App.DeletePluginKey(pluginId, "key"))
}()
// Set using Set api for key2
assert.Nil(t, th.App.SetPluginKey(pluginId, "key2", []byte("test")))
ret, err := th.App.GetPluginKey(pluginId, "key2")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Attempt to insert value for key2
updated, err := th.App.CompareAndSetPluginKey(pluginId, "key2", nil, []byte("test2"))
assert.Nil(t, err)
assert.False(t, updated)
ret, err = th.App.GetPluginKey(pluginId, "key2")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Insert new value for key
updated, err = th.App.CompareAndSetPluginKey(pluginId, "key", nil, []byte("test"))
assert.Nil(t, err)
assert.True(t, updated)
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Should fail to insert again
updated, err = th.App.CompareAndSetPluginKey(pluginId, "key", nil, []byte("test3"))
assert.Nil(t, err)
assert.False(t, updated)
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Test updating using incorrect old value
updated, err = th.App.CompareAndSetPluginKey(pluginId, "key", []byte("oldvalue"), []byte("test3"))
assert.Nil(t, err)
assert.False(t, updated)
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test"), ret)
// Test updating using correct old value
updated, err = th.App.CompareAndSetPluginKey(pluginId, "key", []byte("test"), []byte("test2"))
assert.Nil(t, err)
assert.True(t, updated)
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte("test2"), ret)
}
func TestPluginKeyValueStoreSetWithOptionsJSON(t *testing.T) {
pluginId := "testpluginid"
t.Run("storing a value without providing options works", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
result, err := th.App.SetPluginKeyWithOptions(pluginId, "key", []byte("value-1"), model.PluginKVSetOptions{})
assert.True(t, result)
assert.Nil(t, err)
// and I can get it back!
ret, err := th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte(`value-1`), ret)
})
t.Run("test that setting it atomic when it doesn't match doesn't change anything", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
err := th.App.SetPluginKey(pluginId, "key", []byte("value-1"))
require.Nil(t, err)
result, err := th.App.SetPluginKeyWithOptions(pluginId, "key", []byte("value-3"), model.PluginKVSetOptions{
Atomic: true,
OldValue: []byte("value-2"),
})
assert.False(t, result)
assert.Nil(t, err)
// test that the value didn't change
ret, err := th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte(`value-1`), ret)
})
t.Run("test the atomic change with the proper old value", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
err := th.App.SetPluginKey(pluginId, "key", []byte("value-2"))
require.Nil(t, err)
result, err := th.App.SetPluginKeyWithOptions(pluginId, "key", []byte("value-3"), model.PluginKVSetOptions{
Atomic: true,
OldValue: []byte("value-2"),
})
assert.True(t, result)
assert.Nil(t, err)
// test that the value did change
ret, err := th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte(`value-3`), ret)
})
t.Run("when new value is nil and old value matches with the current, it should delete the currently set value", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
// first set a value.
result, err := th.App.SetPluginKeyWithOptions(pluginId, "nil-test-key-2", []byte("value-1"), model.PluginKVSetOptions{})
require.Nil(t, err)
require.True(t, result)
// now it should delete the set value.
result, err = th.App.SetPluginKeyWithOptions(pluginId, "nil-test-key-2", nil, model.PluginKVSetOptions{
Atomic: true,
OldValue: []byte("value-1"),
})
assert.Nil(t, err)
assert.True(t, result)
ret, err := th.App.GetPluginKey(pluginId, "nil-test-key-2")
assert.Nil(t, err)
assert.Nil(t, ret)
})
t.Run("when new value is nil and there is a value set for the key already, it should delete the currently set value", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
// first set a value.
result, err := th.App.SetPluginKeyWithOptions(pluginId, "nil-test-key-3", []byte("value-1"), model.PluginKVSetOptions{})
require.Nil(t, err)
require.True(t, result)
// now it should delete the set value.
result, err = th.App.SetPluginKeyWithOptions(pluginId, "nil-test-key-3", nil, model.PluginKVSetOptions{})
assert.Nil(t, err)
assert.True(t, result)
// verify a nil value is returned
ret, err := th.App.GetPluginKey(pluginId, "nil-test-key-3")
assert.Nil(t, err)
assert.Nil(t, ret)
// verify the row is actually gone
list, err := th.App.ListPluginKeys(pluginId, 0, 1)
assert.Nil(t, err)
assert.Empty(t, list)
})
t.Run("when old value is nil and there is no value set for the key before, it should set the new value", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
result, err := th.App.SetPluginKeyWithOptions(pluginId, "nil-test-key-4", []byte("value-1"), model.PluginKVSetOptions{
Atomic: true,
OldValue: nil,
})
assert.Nil(t, err)
assert.True(t, result)
ret, err := th.App.GetPluginKey(pluginId, "nil-test-key-4")
assert.Nil(t, err)
assert.Equal(t, []byte("value-1"), ret)
})
t.Run("test that value is set and unset with ExpireInSeconds", func(t *testing.T) {
th := Setup(t)
defer th.TearDown()
result, err := th.App.SetPluginKeyWithOptions(pluginId, "key", []byte("value-1"), model.PluginKVSetOptions{
ExpireInSeconds: 1,
})
assert.True(t, result)
assert.Nil(t, err)
// test that the value is set
ret, err := th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Equal(t, []byte(`value-1`), ret)
// test that the value is not longer
time.Sleep(1500 * time.Millisecond)
ret, err = th.App.GetPluginKey(pluginId, "key")
assert.Nil(t, err)
assert.Nil(t, ret)
})
}
func TestServePluginRequest(t *testing.T) {
th := Setup(t)
defer th.TearDown()
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.PluginSettings.Enable = false })
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/plugins/foo/bar", nil)
th.App.ServePluginRequest(w, r)
assert.Equal(t, http.StatusNotImplemented, w.Result().StatusCode)
}
func TestPrivateServePluginRequest(t *testing.T) {
th := Setup(t)
defer th.TearDown()
testCases := []struct {
Description string
ConfigFunc func(cfg *model.Config)
URL string
ExpectedURL string
}{
{
"no subpath",
func(cfg *model.Config) {},
"/plugins/id/endpoint",
"/endpoint",
},
{
"subpath",
func(cfg *model.Config) { *cfg.ServiceSettings.SiteURL += "/subpath" },
"/subpath/plugins/id/endpoint",
"/endpoint",
},
}
for _, testCase := range testCases {
t.Run(testCase.Description, func(t *testing.T) {
th.App.UpdateConfig(testCase.ConfigFunc)
expectedBody := []byte("body")
request := httptest.NewRequest(http.MethodGet, testCase.URL, bytes.NewReader(expectedBody))
recorder := httptest.NewRecorder()
handler := func(context *plugin.Context, w http.ResponseWriter, r *http.Request) {
assert.Equal(t, testCase.ExpectedURL, r.URL.Path)
body, _ := ioutil.ReadAll(r.Body)
assert.Equal(t, expectedBody, body)
}
request = mux.SetURLVars(request, map[string]string{"plugin_id": "id"})
th.App.servePluginRequest(recorder, request, handler)
})
}
}
func TestHandlePluginRequest(t *testing.T) {
th := Setup(t).InitBasic()
defer th.TearDown()
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = false
*cfg.ServiceSettings.EnableUserAccessTokens = true
})
token, err := th.App.CreateUserAccessToken(&model.UserAccessToken{
UserId: th.BasicUser.Id,
})
require.Nil(t, err)
var assertions func(*http.Request)
router := mux.NewRouter()
router.HandleFunc("/plugins/{plugin_id:[A-Za-z0-9\\_\\-\\.]+}/{anything:.*}", func(_ http.ResponseWriter, r *http.Request) {
th.App.servePluginRequest(nil, r, func(_ *plugin.Context, _ http.ResponseWriter, r *http.Request) {
assertions(r)
})
})
r := httptest.NewRequest("GET", "/plugins/foo/bar", nil)
r.Header.Add("Authorization", "Bearer "+token.Token)
assertions = func(r *http.Request) {
assert.Equal(t, "/bar", r.URL.Path)
assert.Equal(t, th.BasicUser.Id, r.Header.Get("Mattermost-User-Id"))
}
router.ServeHTTP(nil, r)
r = httptest.NewRequest("GET", "/plugins/foo/bar?a=b&access_token="+token.Token+"&c=d", nil)
assertions = func(r *http.Request) {
assert.Equal(t, "/bar", r.URL.Path)
assert.Equal(t, "a=b&c=d", r.URL.RawQuery)
assert.Equal(t, th.BasicUser.Id, r.Header.Get("Mattermost-User-Id"))
}
router.ServeHTTP(nil, r)
r = httptest.NewRequest("GET", "/plugins/foo/bar?a=b&access_token=asdf&c=d", nil)
assertions = func(r *http.Request) {
assert.Equal(t, "/bar", r.URL.Path)
assert.Equal(t, "a=b&c=d", r.URL.RawQuery)
assert.Empty(t, r.Header.Get("Mattermost-User-Id"))
}
router.ServeHTTP(nil, r)
}
func TestGetPluginStatusesDisabled(t *testing.T) {
th := Setup(t)
defer th.TearDown()
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = false
})
_, err := th.App.GetPluginStatuses()
require.NotNil(t, err)
require.EqualError(t, err, "GetPluginStatuses: Plugins have been disabled. Please check your logs for details., ")
}
func TestGetPluginStatuses(t *testing.T) {
th := Setup(t)
defer th.TearDown()
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
})
pluginStatuses, err := th.App.GetPluginStatuses()
require.Nil(t, err)
require.NotNil(t, pluginStatuses)
}
func TestPluginSync(t *testing.T) {
th := Setup(t)
defer th.TearDown()
testCases := []struct {
Description string
ConfigFunc func(cfg *model.Config)
}{
{
"local",
func(cfg *model.Config) {
cfg.FileSettings.DriverName = model.NewString(model.IMAGE_DRIVER_LOCAL)
},
},
{
"s3",
func(cfg *model.Config) {
s3Host := os.Getenv("CI_MINIO_HOST")
if s3Host == "" {
s3Host = "localhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9000"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
cfg.FileSettings.DriverName = model.NewString(model.IMAGE_DRIVER_S3)
cfg.FileSettings.AmazonS3AccessKeyId = model.NewString(model.MINIO_ACCESS_KEY)
cfg.FileSettings.AmazonS3SecretAccessKey = model.NewString(model.MINIO_SECRET_KEY)
cfg.FileSettings.AmazonS3Bucket = model.NewString(model.MINIO_BUCKET)
cfg.FileSettings.AmazonS3PathPrefix = model.NewString("")
cfg.FileSettings.AmazonS3Endpoint = model.NewString(s3Endpoint)
cfg.FileSettings.AmazonS3Region = model.NewString("")
cfg.FileSettings.AmazonS3SSL = model.NewBool(false)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.Description, func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
testCase.ConfigFunc(cfg)
})
env := th.App.GetPluginsEnvironment()
require.NotNil(t, env)
path, _ := fileutils.FindDir("tests")
t.Run("new bundle in the file store", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.RequirePluginSignature = false
})
fileReader, err := os.Open(filepath.Join(path, "testplugin.tar.gz"))
require.NoError(t, err)
defer fileReader.Close()
_, appErr := th.App.WriteFile(fileReader, th.App.getBundleStorePath("testplugin"))
checkNoError(t, appErr)
appErr = th.App.SyncPlugins()
checkNoError(t, appErr)
// Check if installed
pluginStatus, err := env.Statuses()
require.Nil(t, err)
require.Len(t, pluginStatus, 1)
require.Equal(t, pluginStatus[0].PluginId, "testplugin")
})
t.Run("bundle removed from the file store", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.RequirePluginSignature = false
})
appErr := th.App.RemoveFile(th.App.getBundleStorePath("testplugin"))
checkNoError(t, appErr)
appErr = th.App.SyncPlugins()
checkNoError(t, appErr)
// Check if removed
pluginStatus, err := env.Statuses()
require.Nil(t, err)
require.Empty(t, pluginStatus)
})
t.Run("plugin signatures required, no signature", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.RequirePluginSignature = true
})
pluginFileReader, err := os.Open(filepath.Join(path, "testplugin.tar.gz"))
require.NoError(t, err)
defer pluginFileReader.Close()
_, appErr := th.App.WriteFile(pluginFileReader, th.App.getBundleStorePath("testplugin"))
checkNoError(t, appErr)
appErr = th.App.SyncPlugins()
checkNoError(t, appErr)
pluginStatus, err := env.Statuses()
require.Nil(t, err)
require.Len(t, pluginStatus, 0)
})
t.Run("plugin signatures required, wrong signature", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.RequirePluginSignature = true
})
signatureFileReader, err := os.Open(filepath.Join(path, "testplugin2.tar.gz.sig"))
require.NoError(t, err)
defer signatureFileReader.Close()
_, appErr := th.App.WriteFile(signatureFileReader, th.App.getSignatureStorePath("testplugin"))
checkNoError(t, appErr)
appErr = th.App.SyncPlugins()
checkNoError(t, appErr)
pluginStatus, err := env.Statuses()
require.Nil(t, err)
require.Len(t, pluginStatus, 0)
})
t.Run("plugin signatures required, correct signature", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.RequirePluginSignature = true
})
key, err := os.Open(filepath.Join(path, "development-private-key.asc"))
require.NoError(t, err)
appErr := th.App.AddPublicKey("pub_key", key)
checkNoError(t, appErr)
signatureFileReader, err := os.Open(filepath.Join(path, "testplugin.tar.gz.sig"))
require.NoError(t, err)
defer signatureFileReader.Close()
_, appErr = th.App.WriteFile(signatureFileReader, th.App.getSignatureStorePath("testplugin"))
checkNoError(t, appErr)
appErr = th.App.SyncPlugins()
checkNoError(t, appErr)
pluginStatus, err := env.Statuses()
require.Nil(t, err)
require.Len(t, pluginStatus, 1)
require.Equal(t, pluginStatus[0].PluginId, "testplugin")
appErr = th.App.DeletePublicKey("pub_key")
checkNoError(t, appErr)
appErr = th.App.RemovePlugin("testplugin")
checkNoError(t, appErr)
})
})
}
}
func TestPluginPanicLogs(t *testing.T) {
t.Run("should panic", func(t *testing.T) {
th := Setup(t).InitBasic()
defer th.TearDown()
tearDown, _, _ := SetAppEnvironmentWithPlugins(t, []string{
`
package main
import (
"github.com/mattermost/mattermost-server/v5/plugin"
"github.com/mattermost/mattermost-server/v5/model"
)
type MyPlugin struct {
plugin.MattermostPlugin
}
func (p *MyPlugin) MessageWillBePosted(c *plugin.Context, post *model.Post) (*model.Post, string) {
panic("some text from panic")
return nil, ""
}
func main() {
plugin.ClientMain(&MyPlugin{})
}
`,
}, th.App, th.App.NewPluginAPI)
defer tearDown()
post := &model.Post{
UserId: th.BasicUser.Id,
ChannelId: th.BasicChannel.Id,
Message: "message_",
CreateAt: model.GetMillis() - 10000,
}
_, err := th.App.CreatePost(post, th.BasicChannel, false, true)
assert.Nil(t, err)
testlib.AssertLog(t, th.LogBuffer, mlog.LevelDebug, "panic: some text from panic")
})
}
func TestProcessPrepackagedPlugins(t *testing.T) {
th := Setup(t)
defer th.TearDown()
testsPath, _ := fileutils.FindDir("tests")
prepackagedPluginsPath := filepath.Join(testsPath, prepackagedPluginsDir)
fileErr := os.Mkdir(prepackagedPluginsPath, os.ModePerm)
require.NoError(t, fileErr)
defer os.RemoveAll(prepackagedPluginsPath)
prepackagedPluginsDir, found := fileutils.FindDir(prepackagedPluginsPath)
require.True(t, found, "failed to find prepackaged plugins directory")
testPluginPath := filepath.Join(testsPath, "testplugin.tar.gz")
fileErr = utils.CopyFile(testPluginPath, filepath.Join(prepackagedPluginsDir, "testplugin.tar.gz"))
require.NoError(t, fileErr)
t.Run("automatic, enabled plugin, no signature", func(t *testing.T) {
// Install the plugin and enable
pluginBytes, err := ioutil.ReadFile(testPluginPath)
require.NoError(t, err)
require.NotNil(t, pluginBytes)
manifest, appErr := th.App.installPluginLocally(bytes.NewReader(pluginBytes), nil, installPluginLocallyAlways)
require.Nil(t, appErr)
require.Equal(t, "testplugin", manifest.Id)
env := th.App.GetPluginsEnvironment()
activatedManifest, activated, err := env.Activate(manifest.Id)
require.NoError(t, err)
require.True(t, activated)
require.Equal(t, manifest, activatedManifest)
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
*cfg.PluginSettings.AutomaticPrepackagedPlugins = true
})
plugins := th.App.processPrepackagedPlugins(prepackagedPluginsDir)
require.Len(t, plugins, 1)
require.Equal(t, plugins[0].Manifest.Id, "testplugin")
require.Empty(t, plugins[0].Signature, 0)
pluginStatus, err := env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 1)
require.Equal(t, pluginStatus[0].PluginId, "testplugin")
appErr = th.App.RemovePlugin("testplugin")
checkNoError(t, appErr)
pluginStatus, err = env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 0)
})
t.Run("automatic, not enabled plugin", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
*cfg.PluginSettings.AutomaticPrepackagedPlugins = true
})
env := th.App.GetPluginsEnvironment()
plugins := th.App.processPrepackagedPlugins(prepackagedPluginsDir)
require.Len(t, plugins, 1)
require.Equal(t, plugins[0].Manifest.Id, "testplugin")
require.Empty(t, plugins[0].Signature, 0)
pluginStatus, err := env.Statuses()
require.NoError(t, err)
require.Empty(t, pluginStatus, 0)
})
t.Run("automatic, multiple plugins with signatures, not enabled", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
*cfg.PluginSettings.AutomaticPrepackagedPlugins = true
})
env := th.App.GetPluginsEnvironment()
// Add signature
testPluginSignaturePath := filepath.Join(testsPath, "testplugin.tar.gz.sig")
err := utils.CopyFile(testPluginSignaturePath, filepath.Join(prepackagedPluginsDir, "testplugin.tar.gz.sig"))
require.NoError(t, err)
// Add second plugin
testPlugin2Path := filepath.Join(testsPath, "testplugin2.tar.gz")
err = utils.CopyFile(testPlugin2Path, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz"))
require.NoError(t, err)
testPlugin2SignaturePath := filepath.Join(testsPath, "testplugin2.tar.gz.sig")
err = utils.CopyFile(testPlugin2SignaturePath, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz.sig"))
require.NoError(t, err)
plugins := th.App.processPrepackagedPlugins(prepackagedPluginsDir)
require.Len(t, plugins, 2)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[0].Manifest.Id)
require.NotEmpty(t, plugins[0].Signature)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[1].Manifest.Id)
require.NotEmpty(t, plugins[1].Signature)
pluginStatus, err := env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 0)
})
t.Run("automatic, multiple plugins with signatures, one enabled", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
*cfg.PluginSettings.AutomaticPrepackagedPlugins = true
})
env := th.App.GetPluginsEnvironment()
// Add signature
testPluginSignaturePath := filepath.Join(testsPath, "testplugin.tar.gz.sig")
err := utils.CopyFile(testPluginSignaturePath, filepath.Join(prepackagedPluginsDir, "testplugin.tar.gz.sig"))
require.NoError(t, err)
// Install first plugin and enable
pluginBytes, err := ioutil.ReadFile(testPluginPath)
require.NoError(t, err)
require.NotNil(t, pluginBytes)
manifest, appErr := th.App.installPluginLocally(bytes.NewReader(pluginBytes), nil, installPluginLocallyAlways)
require.Nil(t, appErr)
require.Equal(t, "testplugin", manifest.Id)
activatedManifest, activated, err := env.Activate(manifest.Id)
require.NoError(t, err)
require.True(t, activated)
require.Equal(t, manifest, activatedManifest)
// Add second plugin
testPlugin2Path := filepath.Join(testsPath, "testplugin2.tar.gz")
err = utils.CopyFile(testPlugin2Path, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz"))
require.NoError(t, err)
testPlugin2SignaturePath := filepath.Join(testsPath, "testplugin2.tar.gz.sig")
err = utils.CopyFile(testPlugin2SignaturePath, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz.sig"))
require.NoError(t, err)
plugins := th.App.processPrepackagedPlugins(prepackagedPluginsDir)
require.Len(t, plugins, 2)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[0].Manifest.Id)
require.NotEmpty(t, plugins[0].Signature)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[1].Manifest.Id)
require.NotEmpty(t, plugins[1].Signature)
pluginStatus, err := env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 1)
require.Equal(t, pluginStatus[0].PluginId, "testplugin")
appErr = th.App.RemovePlugin("testplugin")
checkNoError(t, appErr)
pluginStatus, err = env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 0)
})
t.Run("non-automatic, multiple plugins", func(t *testing.T) {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.PluginSettings.Enable = true
*cfg.PluginSettings.AutomaticPrepackagedPlugins = false
})
env := th.App.GetPluginsEnvironment()
testPlugin2Path := filepath.Join(testsPath, "testplugin2.tar.gz")
err := utils.CopyFile(testPlugin2Path, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz"))
require.NoError(t, err)
testPlugin2SignaturePath := filepath.Join(testsPath, "testplugin2.tar.gz.sig")
err = utils.CopyFile(testPlugin2SignaturePath, filepath.Join(prepackagedPluginsDir, "testplugin2.tar.gz.sig"))
require.NoError(t, err)
plugins := th.App.processPrepackagedPlugins(prepackagedPluginsDir)
require.Len(t, plugins, 2)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[0].Manifest.Id)
require.NotEmpty(t, plugins[0].Signature)
require.Contains(t, []string{"testplugin", "testplugin2"}, plugins[1].Manifest.Id)
require.NotEmpty(t, plugins[1].Signature)
pluginStatus, err := env.Statuses()
require.NoError(t, err)
require.Len(t, pluginStatus, 0)
})
}
|
[
"\"CI_MINIO_HOST\"",
"\"CI_MINIO_PORT\""
] |
[] |
[
"CI_MINIO_PORT",
"CI_MINIO_HOST"
] |
[]
|
["CI_MINIO_PORT", "CI_MINIO_HOST"]
|
go
| 2 | 0 | |
st/clitests/auth_spec.py
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
import os
import sys
import yaml
import time
from ldap_setup import LdapInfo
from framework import Config
from framework import S3PyCliTest
from auth import AuthTest
from s3client_config import S3ClientConfig
from s3cmd import S3cmdTest
from s3fi import S3fiTest
from awss3api import AwsTest
from shutil import copyfile
import shutil
from s3confstore.cortx_s3_confstore import S3CortxConfStore
home_dir = os.path.expanduser("~")
original_config_file = os.path.join(home_dir, '.sgs3iamcli/config.yaml')
backup_config_file = os.path.join(home_dir, '.sgs3iamcli/backup_config.yaml')
# Helps debugging
# Config.log_enabled = True
# Config.dummy_run = True
# Set time_readable_format to False if you want to display the time in milli seconds.
# Config.time_readable_format = False
# global params required for suit
def update_config_yaml(new_config_entries):
shutil.copy2(original_config_file, backup_config_file)
with open(original_config_file, 'r') as f:
cur_yaml = yaml.load(f)
cur_yaml.update(new_config_entries)
with open(original_config_file, 'w') as f:
yaml.dump(cur_yaml, f, default_flow_style = False)
def restore_config_yaml():
# Restore original ~/.sgs3iamcli/config.yaml file
shutil.copy2(backup_config_file, original_config_file)
class GlobalTestState():
root_access_key = ""
root_secret_key = ""
# Extract the response elements from response which has the following format
# <Key 1> = <Value 1>, <Key 2> = <Value 2> ... <Key n> = <Value n>
def get_response_elements(response):
response_elements = {}
key_pairs = response.split(',')
for key_pair in key_pairs:
tokens = key_pair.split('=')
response_elements[tokens[0].strip()] = tokens[1].strip()
return response_elements
# Load test config file
def load_test_config():
conf_file = os.path.join(os.path.dirname(__file__),'s3iamcli_test_config.yaml')
with open(conf_file, 'r') as f:
config = yaml.safe_load(f)
S3ClientConfig.ldapuser = config['ldapuser']
S3ClientConfig.ldappasswd = config['ldappasswd']
# Run before all to setup the test environment.
def before_all():
load_test_config()
print("Configuring LDAP")
S3PyCliTest('Before_all').before_all()
# Set S3ClientConfig with root credentials
def _use_root_credentials():
S3ClientConfig.access_key_id = GlobalTestState.root_access_key
S3ClientConfig.secret_key = GlobalTestState.root_secret_key
# Test create account API
def account_tests():
# Test Create Account with keys
# 1. Positive
test_msg = "Create account s3testwithkeys1 with Access key and Secret key"
account_args = {
'AccountName': 's3testwithkeys1',
'Email': '[email protected]',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAAA1',
'secret_key': 'SSSSSSSSS1'
}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
AuthTest("Test passed. Deleting account.").delete_account(**account_args).execute_test()
# 2. Negative
test_msg = "Create account s3testwithkeys2 with only Access key"
account_args = {
'AccountName': 's3testwithkeys2',
'Email': '[email protected]',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAAA2'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 3. Negative
test_msg = "Create account s3testwithkeys3 with only Secret key"
account_args = {
'AccountName': 's3testwithkeys3',
'Email': '[email protected]',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'secret_key': 'SSSSSSSS2'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 4. Negative
test_msg = "Create account s3testwithkeys4 with invalid Access key"
account_args = {
'AccountName': 's3testwithkeys4',
'Email': '[email protected]',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAA',
'secret_key': 'SSSSSSSSS12'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 5. Negative
test_msg = "Create account s3testwithkeys5 with invalid Secret key"
account_args = {
'AccountName': 's3testwithkeys5',
'Email': '[email protected]',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAA3',
'secret_key': 'SSSS'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
test_msg = "Create account s3test"
account_args = {'AccountName': 's3test', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
GlobalTestState.root_access_key = account_response_elements['AccessKeyId']
GlobalTestState.root_secret_key = account_response_elements['SecretKey']
# Create Account again with same email ID
test_msg = "Create account s3test1 should fail with EmailAlreadyExists"
account_args = {'AccountName': 's3test1', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "Account wasn't created."
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_should_match_pattern(account_response_pattern)
result.command_response_should_have("EmailAlreadyExists")
test_msg = "List accounts"
account_args = {'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account(**account_args).execute_test()
result.command_should_match_pattern(accounts_response_pattern)
test_msg = "List accounts - Take ldapuser and ldappasswd from config"
# Put SG_LDAP_PASSWD and SG_LDAP_USER in ~/.sgs3iamcli/config.yaml file
new_config_entries = {'SG_LDAP_PASSWD' : S3ClientConfig.ldappasswd, 'SG_LDAP_USER': S3ClientConfig.ldapuser}
update_config_yaml(new_config_entries)
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account().execute_test()
result.command_should_match_pattern(accounts_response_pattern)
restore_config_yaml()
test_msg = "List accounts - Take ldapuser and ldappasswd from env"
# Declare SG_LDAP_USER and SG_LDAP_PASSWD environment variables
os.environ['SG_LDAP_USER'] = S3ClientConfig.ldapuser
os.environ['SG_LDAP_PASSWD'] = S3ClientConfig.ldappasswd
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account().execute_test()
result.command_should_match_pattern(accounts_response_pattern)
# Remove environment variables declared above
os.environ.pop("SG_LDAP_USER")
os.environ.pop("SG_LDAP_PASSWD")
test_msg = "List accounts - Take invalid ldapuser and ldappasswd from config"
new_config_entries = {'SG_LDAP_PASSWD': 'sgiamadmin#', 'SG_LDAP_USER': 'ldapadmin#'}
update_config_yaml(new_config_entries)
result = AuthTest(test_msg).list_account().execute_test(negative_case=True)
result.command_should_match_pattern("Failed to list accounts")
restore_config_yaml()
#TODO - Need to fix this test. Currently skipping this test as it waits for password to be entered manually through prompt.
'''
test_msg = "List accounts - Take ldapuser and ldappasswd from prompt"
_use_root_credentials()
accounts_response_pattern = "Enter Ldap User Id: Enter Ldap password: AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
stdin_values = S3ClientConfig.ldapuser + '\n' + S3ClientConfig.ldappasswd
S3ClientConfig.ldapuser = None
S3ClientConfig.ldappasswd = None
result = AuthTest(test_msg).list_account().execute_test(False, False, stdin_values)
result.command_should_match_pattern(accounts_response_pattern)
'''
load_test_config()
# Test create user API
# Case 1 - Path not given (take default value).
# Case 2 - Path given
def user_tests():
_use_root_credentials()
date_pattern = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9][\+-][0-9]*:[0-9]*"
#Below account creation is for aws iam cli system testing. First delete account if already exist and then create it newly
test_msg = "Delete account if already exist"
account_args = {}
test_msg = "Delete account aws_iam_test_account"
account_args = {'AccountName': 'aws_iam_test_account'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)
test_msg = "Create account aws_iam_test_account"
account_args = {'AccountName': 'aws_iam_test_account', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
#Save the details in file
f = open("aws_iam_credential_file" , "w")
f.write("[default]\n")
f.write("aws_access_key_id = ")
f.write(account_response_elements['AccessKeyId'])
f.write("\naws_secret_access_key = ")
f.write(account_response_elements['SecretKey'])
f.close()
#GetTempAuth Start
#Create account
test_msg = "Create account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "tempAuthTestAccount"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
#Create Account LoginProfile for tempAuthTestAccount"
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="tempAuthTestAccount"
user_args['Password'] = "accountpassword"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Setting aws temporary credentials under environment variables
response_elements = get_response_elements(result.status.stdout)
os.environ["AWS_ACCESS_KEY_ID"] = response_elements['AccessKeyId']
os.environ["AWS_SECRET_ACCESS_KEY"] = response_elements['SecretAccessKey']
os.environ["AWS_SESSION_TOKEN"] = response_elements['SessionToken']
AwsTest('Aws can create bucket').create_bucket("tempcredbucket").execute_test().command_is_successful()
AwsTest('Aws can delete bucket').delete_bucket("tempcredbucket").execute_test().command_is_successful()
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
del os.environ["AWS_SESSION_TOKEN"]
#Create User
access_key_args['UserName'] = "u1"
test_msg = "Create User u1"
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**access_key_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
#Create user loginprofile
access_key_args['Password'] = "userpassword"
test_msg = 'create user login profile for u1'
user_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**access_key_args).execute_test()
#Get Temp Auth Credentials for account for user u1
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Get Temp Auth Credentials for account with duration more than max allowed
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
access_key_args['Duration'] = "500000"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("MaxDurationIntervalExceeded")
#Get Temp Auth Credentials for account with duration less than minimum required
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
access_key_args['Duration'] = "50"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("MinDurationIntervalNotMaintained")
#Update password Reset Flag and check
#update userlogin profile
test_msg = 'update user login profile for u1'
access_key_args['PasswordResetRequired']=True
result = AuthTest(test_msg).update_login_profile(user_name_flag ,**access_key_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
#Get Temp Auth Credentials for account for passwordreset True
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("PasswordResetRequired")
# UpdateAccountLoginProfile and DeleteAccount with Temp Credentials -- Start
os.environ["AWS_ACCESS_KEY_ID"] = response_elements['AccessKeyId']
os.environ["AWS_SECRET_ACCESS_KEY"] = response_elements['SecretAccessKey']
os.environ["AWS_SESSION_TOKEN"] = response_elements['SessionToken']
test_msg = 'UpdateAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="tempAuthTestAccount"
account_args['Password'] ="newpwd1234"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
#Delete account
test_msg = "Delete account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': '[email protected]', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
del os.environ["AWS_SESSION_TOKEN"]
# UpdateAccountLoginProfile and DeleteAccount with Temp Credentials -- End
#GetTempAuth End
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Update User s3user1 (new name = s3user1New, new path - /test/success)'
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3user1New"
user_args['NewPath'] = "/test/success/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'create user login profile should fail for exceeding max allowed password length.'
user_args = {}
maxPasswordLength = "abcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijk\
abcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrddd";
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "s3user1New"
user_args['Password'] = maxPasswordLength;
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for invalid username.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "s3userinvalidname"
user_args['Password'] = "abcdef"
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for empty username.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="\"\""
user_args['Password'] = "abcdre"
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for username missing.'
user_args = {}
user_name_flag = ""
password_flag = "--password"
user_args['UserName'] =""
user_args['Password'] = "abcdref"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("User name is required for user login-profile creation")
test_msg = 'create user login profile should fail for password missing.'
user_args = {}
user_name_flag = "-n"
password_flag = ""
user_args['UserName'] ="abcd"
user_args['Password'] = ""
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("User password is required for user login-profile creation")
test_msg = 'create user login profile should fail with username as root.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="root"
user_args['Password'] = "pqrsef"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot create account login profile with CreateUserLoginProfile")
test_msg = 'create user login profile should succeed.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="s3user1New"
user_args['Password'] = "abcdefg"
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'create user login profile failed for user with existing login profile'
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("EntityAlreadyExists")
#********* Test create user login profile with --password-reset-required *********************
test_msg = 'Create User user01'
user_args = {'UserName': 'user01'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'create user login profile should succeed with --password-reset-required'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "user01"
user_args['Password'] = "abcdef"
user_args['PasswordResetRequired'] = "True"
login_profile_response_pattern = "Login Profile "+date_pattern+" "+user_args['PasswordResetRequired']+" "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'Delete User user01'
user_args = {}
user_args['UserName'] = "user01"
user_args['Password'] = "abcdef"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
#********* Test create user login profile with --no-password-reset-required *********************
test_msg = 'Create User user02'
user_args = {'UserName': 'user02'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'create user login profile should succeed with --no-password-reset-required'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "user02"
user_args['Password'] = "abcddt"
user_args['PasswordResetRequired'] = "False"
login_profile_response_pattern = "Login Profile "+date_pattern+" "+user_args['PasswordResetRequired']+" "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'Delete User user02'
user_args = {}
user_args['UserName'] = "user02"
user_args['Password'] = "abcddt"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'GetUserLoginProfile Successfull'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test()
result.command_should_match_pattern(user_profile_response_pattern)
test_msg = 'GetUserLoginProfile failed for invalid user'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="abcd"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to get Login Profile")
test_msg = 'GetUserLoginProfile should fail with username as root'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="root"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot get account login profile with GetUserLoginProfile")
test_msg = "Create User loginProfileTestUser (default path)"
user_args = {'UserName': 'loginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'GetUserLoginProfile failed for user without LoginProfile created'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="loginProfileTestUser"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("NoSuchEntity")
test_msg = 'Delete User loginProfileTestUser'
user_args = {}
user_args['UserName'] = "loginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User updateLoginProfileTestUser (default path)"
user_args = {'UserName': 'updateLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is updateLoginProfileTestUser)'
access_key_args = {}
access_key_args['UserName'] = 'updateLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'UpdateLoginProfile should fail when tried with IAM User accessKey-secretKey'
user_name_flag = "-n"
access_key_args['UserName'] ="updateLoginProfileTestUser"
access_key_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile_with_user_key(user_name_flag , **access_key_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidUser")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User UpdateLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'UpdateLoginProfile is successful'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
test_msg = 'UpdateLoginProfile fails without new password ,password-reset and no-password-reset flag entered'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Please provide password or password-reset flag")
test_msg = 'UpdateLoginProfile should fail with username as root'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="root"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot update account login profile with UpdateUserLoginProfile")
test_msg = 'UpdateLoginProfile is successful with only password-reset flag entered'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_args['PasswordResetRequired']=True
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
test_msg = 'GetLoginProfile to validate password reset flag set to True'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("True")
test_msg = "Create User updateLoginProfileTestUser (default path)"
user_args = {'UserName': 'updateLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'UpdateUserLoginProfile failed for user without LoginProfile created'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="updateLoginProfileTestUser"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("NoSuchEntity")
test_msg = 'Delete User updateLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'UpdateUserLoginProfile failed for username missing.'
user_args = {}
user_name_flag = ""
user_args['UserName'] =""
user_args['Password'] = "abcdefd"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("UserName is required for UpdateUserLoginProfile")
test_msg = 'UpdateLoginProfile failed as user doesnt exist in ldap'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="dummyUser"
user_args['Password'] = "password"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("UpdateUserLoginProfile failed")
test_msg = 'UpdateLoginProfile failed for invalid username'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="dummyUser$"
user_args['Password'] = "password"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
#*************************Test s3iamcli ChangePassword for IAM user******************
test_msg = "Create User changePasswordUserLoginProfileTestUser "
user_args = {'UserName': 'changePasswordUserLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is changePasswordUserLoginProfileTestUser)'
access_key_args = {}
user_access_key_args = {}
access_key_args['UserName'] = 'changePasswordUserLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
user_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
user_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'create user login profile for changePasswordUserLoginProfileTestUser.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="changePasswordUserLoginProfileTestUser"
user_args['Password'] = "abcdfs"
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'ChangePassword should fail with root accessKey-secretKey, user OldPassword and NewPassword.'
account_user_access_key_args = {}
account_user_access_key_args['AccessKeyId'] = S3ClientConfig.access_key_id
account_user_access_key_args['SecretAccessKey'] = S3ClientConfig.secret_key
account_user_access_key_args['OldPassword'] ="abcdfs"
account_user_access_key_args['NewPassword'] = "pqrswq"
result = AuthTest(test_msg).change_user_password(**account_user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidUserType")
test_msg = 'ChangePassword should fail with IAM user accessKey-secretKey,NewPassword and invalid oldPassword.'
test_access_key_args = {}
test_access_key_args['AccessKeyId'] = user_access_key_args['AccessKeyId']
test_access_key_args['SecretAccessKey'] = user_access_key_args['SecretAccessKey']
test_access_key_args['NewPassword'] = "pqrswq"
test_access_key_args['OldPassword'] = "pqrsqq"
result = AuthTest(test_msg).change_user_password(**test_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidPassword")
test_msg = 'ChangePassword with IAM User accessKey-secretKey, OldPassword and NewPassowrd should succeed.'
user_access_key_args['OldPassword'] ="abcdfs"
user_access_key_args['NewPassword'] = "pqrsoe"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'Two subsequent ChangePassword with valid password value should succeed - first changepassword'
user_access_key_args['OldPassword'] ="pqrsoe"
user_access_key_args['NewPassword'] = "vcxvsd"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'Two subsequent ChangePassword with valid password value should succeed - second changepassword'
user_access_key_args['OldPassword'] ="vcxvsd"
user_access_key_args['NewPassword'] = "xyzdet"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with same value for oldPassword and newPassword should fail.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "xyzdet"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidPassword")
test_msg = 'ChangePassword with empty value i.e\"\" for newPassword should fail.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "\"\""
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("Invalid length for parameter NewPassword")
test_msg = 'ChangePassword with special character i.e. pqrsdd\\t as newPassword should succeed.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "pqrsdd\\t"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with space i.e." avcghj " as newPassword should succeed.'
user_access_key_args['OldPassword'] ="pqrsdd\\t"
user_access_key_args['NewPassword'] = " avcghj "
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with special character e.g xvc#?*% as newPassword should succeed.'
user_access_key_args['OldPassword'] =" avcghj "
user_access_key_args['NewPassword'] = "xvc#?*%"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = "Create User TestUser "
user_args = {'UserName': 'TestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is TestUser)'
access_key_args = {}
test_user_access_key_args = {}
access_key_args['UserName'] = 'TestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
test_user_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_user_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'ChangePassword should fail with another IAM user(i.e.TestUser) accessKey-secretKey, OldPassword and NewPassword.'
test_user_access_key_args['OldPassword'] ="pqrsdd"
test_user_access_key_args['NewPassword'] = "xyzadd"
result = AuthTest(test_msg).change_user_password(**account_user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidUserType")
test_msg = 'Delete access key for changePasswordUserLoginProfileTestUser'
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete access key for TestUser'
result = AuthTest(test_msg).delete_access_key(**test_user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User changePasswordUserLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "changePasswordUserLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Delete User TestUser'
user_args = {}
user_args['UserName'] = "TestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = "abcdiu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'create account login profile should fail for already created profile.'
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = "abcdiu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("The request was rejected because it attempted to create or update a resource that already exists")
test_msg = 'create account login profile should fail for exceeding max allowed password length.'
user_args = {}
maxPasswordLength = "abcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijk\
abcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrddd";
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = maxPasswordLength;
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create Account login profile")
test_msg = 'create account login profile should fail for empty account name.'
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="\"\""
user_args['Password'] = "abcdriu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account name is required")
test_msg = 'create account login profile should fail for account missing name.'
user_args = {}
account_name_flag = ""
password_flag = "--password"
user_args['AccountName'] =""
user_args['Password'] = "abcdriu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account name is required")
test_msg = 'create account login profile should fail for password missing.'
user_args = {}
account_name_flag = "-n"
password_flag = ""
user_args['AccountName'] ="abcd"
user_args['Password'] = ""
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account login password is required")
test_msg = "Create account s3test_loginprofile0"
account_args = {'AccountName': 's3test_loginprofile', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
test_msg = "Create User accountLoginProfileTestUser"
user_args = {'UserName': 'accountLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is accountLoginProfileTestUser)'
access_key_args = {}
access_key_args['AccountName'] = 'accountLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'CreateAccountLoginProfile should fail when tried with IAM User accessKey-secretKey'
user_name_flag = "-n"
password_flag = "--password"
access_key_args['AccountName'] ="s3test_loginprofile0"
access_key_args['Password'] = "newPassword"
result = AuthTest(test_msg).create_account_login_profile(user_name_flag , password_flag,\
**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User accountLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "accountLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create account s3test_loginprofile1"
account_args = {'AccountName': 's3test_loginprofile1', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "s3test_loginprofile1"
access_key_args['AccessKeyId'] = account_response_elements1['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements1['SecretKey']
test_msg = "Create account s3test_loginprofile2"
account_args = {'AccountName': 's3test_loginprofile2', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result2 = AuthTest(test_msg).create_account(**account_args).execute_test()
result2.command_should_match_pattern(account_response_pattern)
account_response_elements2 = get_response_elements(result2.status.stdout)
test_msg = "Attempt: create account-login-profile for account name - s3test_loginprofile1 and access key of account s3test_loginprofile2 - Should fail."
access_key_args2 = {}
access_key_args2['AccountName'] = "s3test_loginprofile1"
access_key_args2['AccessKeyId'] = account_response_elements2['AccessKeyId']
access_key_args2['SecretAccessKey'] = account_response_elements2['SecretKey']
access_key_args2['Password'] = "newPassword"
result = AuthTest(test_msg).create_account_login_profile(user_name_flag , password_flag,\
**access_key_args2).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
account_args = {}
test_msg = "Delete account s3test_loginprofile1"
account_args = {'AccountName': 's3test_loginprofile1', 'Email': '[email protected]', 'force': True}
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
account_args = {}
test_msg = "Delete account s3test_loginprofile2"
account_args = {'AccountName': 's3test_loginprofile2', 'Email': '[email protected]', 'force': True}
S3ClientConfig.access_key_id = access_key_args2['AccessKeyId']
S3ClientConfig.secret_key = access_key_args2['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
test_msg = 'GetAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
account_args['AccountName'] ="s3test"
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = (true|false), AccountName = [\s\S]*"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="s3test"
account_args['Password'] ="s3test456"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile Successfull with ldap credentials'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="s3test"
account_args['Password'] ="s3test4567"
account_args['AccessKeyId'] = S3ClientConfig.ldapuser
account_args['SecretAccessKey'] = S3ClientConfig.ldappasswd
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = "Create account s3test_loginprofile_update"
account_args = {'AccountName': 's3test_loginprofile_update', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "s3test_loginprofile_update"
access_key_args['AccessKeyId'] = account_response_elements1['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements1['SecretKey']
access_key_args['Password'] = "abcdoy"
test_msg = "create account-login-profile for account name - s3test_loginprofile_update with PasswordResetRequired - false."
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
account_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile should succeed with PasswordResetRequired set to true'
account_name_flag = "-n"
password_flag = "--password"
access_key_args['PasswordResetRequired'] ="True"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'GetAccountLoginProfile Successfull'
account_name_flag = "-n"
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = true, AccountName = [\s\S]*"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = "Create User updateAccountLoginProfileTestUser"
user_args = {'UserName': 'updateAccountLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is accountLoginProfileTestUser)'
access_key_args = {}
access_key_args['AccountName'] = 'updateAccountLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
access_key_args['AccountName'] = 's3test_loginprofile_update'
test_msg = 'UpdateAccountLoginProfile should fail for unauthorized user'
access_key_args['Password'] = "abcd"
account_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User updateAccountLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateAccountLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User getaccountloginprofiletest"
user_args = {'UserName': 'getaccountloginprofiletest'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key'
account_args = {}
account_args['UserName'] = 'getaccountloginprofiletest'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**account_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
account_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
account_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'GetAccountLoginProfile should fail when tried with IAM User accessKey-secretKey'
account_name_flag = "-n"
account_args['AccountName'] ="s3test"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **account_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**account_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User getaccountloginprofiletest'
user_args = {}
user_args['UserName'] = "getaccountloginprofiletest"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'List Users (path prefix = /test/)'
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
test_msg = "List Users - Take access key and secret key from config"
new_config_entries = {'SG_ACCESS_KEY' : S3ClientConfig.access_key_id, 'SG_SECRET_KEY': S3ClientConfig.secret_key}
update_config_yaml(new_config_entries)
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
restore_config_yaml()
test_msg = "List users - Take access key and secret key from env"
_use_root_credentials()
# Declare SG_LDAP_USER and SG_LDAP_PASSWD environment variables
os.environ['SG_ACCESS_KEY'] = S3ClientConfig.access_key_id
os.environ['SG_SECRET_KEY'] = S3ClientConfig.secret_key
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
# Remove environment variables declared above
os.environ.pop('SG_ACCESS_KEY')
os.environ.pop('SG_SECRET_KEY')
#TODO - Need to fix this test. Currently skipping this test as it waits for password to be entered manually through prompt.
'''
test_msg = "List users - Take access key and secret key from prompt"
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "Enter Access Key: Enter Secret Key: UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
stdin_values = S3ClientConfig.access_key_id + '\n' + S3ClientConfig.secret_key
S3ClientConfig.access_key_id = None
S3ClientConfig.secret_key = None
result = AuthTest(test_msg).list_users(**user_args).execute_test(False, False, stdin_values)
result.command_should_match_pattern(list_user_pattern)
'''
_use_root_credentials()
test_msg = 'Reset s3user1 user attributes (path and name)'
user_args = {}
user_args['UserName'] = "s3user1New"
user_args['NewUserName'] = "s3user1"
user_args['NewPath'] = "/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User s3user2 (path = /test/)"
user_args['UserName'] = "s3user2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
test_msg = 'Delete User s3user2'
user_args['UserName'] = "s3user2"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Update User root (new name = s3root) should fail'
user_args = {}
user_args['UserName'] = "root"
user_args['NewUserName'] = "s3root"
result = AuthTest(test_msg).update_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot change user name of root user")
test_msg = 'Update User root (new path - /test/success)'
user_args = {}
user_args['UserName'] = "root"
user_args['NewPath'] = "/test/success/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'List Users (default path)'
user_args = {}
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result == ""
test_msg = 'Reset root user attributes (path and name)'
user_args = {}
user_args['UserName'] = "root"
user_args['NewPath'] = "/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
# Test create user API
# Each user can have only 2 access keys. Hence test all the APIs in the same function.
def accesskey_tests():
access_key_args = {}
test_msg = 'Create access key (user name not provided)'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Create access key (user doest not exist.)'
access_key_args = {}
access_key_args['UserName'] = 'userDoesNotExist'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_response_should_have("Failed to create access key.")
test_msg = 'Create access key (user name is root)'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Create access key (Allow only 2 credentials per user.)'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create access key.")
test_msg = 'Delete access key (user name and access key id combination is incorrect)'
access_key_args['UserName'] = 'root3'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to delete access key.")
test_msg = 'Update access key for root user should fail(Change status from Active to Inactive)'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).update_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Access key status for root user can not be changed")
test_msg = 'Delete access key'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
# List the acess keys to check for status change
test_msg = 'List access keys'
access_key_args['UserName'] = 'root'
accesskey_response_pattern = "UserName = root, AccessKeyId = [\w-]*, Status = Active$"
result = AuthTest(test_msg).list_access_keys(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
user_args = {}
user_args['UserName'] = "s3user1"
test_msg = "Create User s3user1 (default path)"
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is s3user1)'
access_key_args = {}
access_key_args['UserName'] = 's3user1'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Update access key (Change status from Active to Inactive)'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).update_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key Updated.")
test_msg = 'List access keys (Check if status is inactive.)'
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).list_access_keys(**access_key_args).execute_test()
result.command_response_should_have("Inactive")
test_msg = 'Delete access key'
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
# Check if non root users are not allowed to use own access key
# and secret key on other users for creating and deleting access keys
'''
Setup for tests:
'''
_use_root_credentials()
user_args = {}
test_msg = "Create User s3user_1 using root access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_1"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
test_msg = "Create access key using root access key and secret key " \
+ "(user name is s3user_1)"
access_key_args['UserName'] = 's3user_1'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
response_elements = get_response_elements(result.status.stdout)
# Saving access key and secret key for s3user_1 for later use.
access_key_id_of_s3user1 = response_elements['AccessKeyId']
secret_key_of_s3user1 = response_elements['SecretAccessKey']
# Overwriting values of access key and secret key given by
# _use_root_credentials() with s3user_1's access key and secret key.
S3ClientConfig.access_key_id = access_key_id_of_s3user1
S3ClientConfig.secret_key = secret_key_of_s3user1
'''
runTest:
'''
test_msg = "Create User s3user_2 using s3user_1's access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = "Create User s3user_2 using root's access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
'''
runTest:
'''
test_msg = "Create access key using s3user_1's access key and secret key " \
+ "(user name is s3user_2)"
S3ClientConfig.access_key_id = access_key_id_of_s3user1
S3ClientConfig.secret_key = secret_key_of_s3user1
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = "Create access key using root access key and secret key " \
+ "(user name is s3user_2)"
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
response_elements = get_response_elements(result.status.stdout)
# Saving access key and secret key for s3user_1 for later use.
access_key_id_of_s3user2 = response_elements['AccessKeyId']
secret_key_of_s3user2 = response_elements['SecretAccessKey']
# Overwriting values of access key and secret key given by
# _use_root_credentials() with s3user_2's access key and secret key.
S3ClientConfig.access_key_id = access_key_id_of_s3user2
S3ClientConfig.secret_key = secret_key_of_s3user2
'''
runTest:
'''
test_msg = 'Delete access key of s3user_1 using s3user_2\'s access key' \
+ ' and secret key'
access_key_args['UserName'] = 's3user_1'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = 'Delete access key of s3user_1 using root credentials'
access_key_args['UserName'] = 's3user_1'
access_key_args['AccessKeyId'] = access_key_id_of_s3user1
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
S3ClientConfig.access_key_id = access_key_id_of_s3user2
S3ClientConfig.secret_key = secret_key_of_s3user2
'''
runTest:
'''
test_msg = "Delete User s3user_1 using s3user_2's access key and secret key"
user_args['UserName'] = "s3user_1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Teardown:
'''
_use_root_credentials()
test_msg = 'Delete access key of s3user_2 using root access key and secret key'
access_key_args['UserName'] = 's3user_2'
access_key_args['AccessKeyId'] = access_key_id_of_s3user2
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user_1 using root access key and secret key'
user_args = {}
user_args['UserName'] = "s3user_1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Delete User s3user_2 using root access key and secret key'
user_args = {}
user_args['UserName'] = "s3user_2"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
'''
Setup for tests for scenario when one account's access key
and secret key are used for creating access key for user in aanother account :
'''
test_msg = "Create account s3test_1"
account_args = {'AccountName': 's3test_1', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
# Overwriting values of access key and secret key given by
# _use_root_credentials() with new account 's3test_1's access key and secret key.
S3ClientConfig.access_key_id = account_response_elements['AccessKeyId']
S3ClientConfig.secret_key = account_response_elements['SecretKey']
'''
runTest:
'''
test_msg = "Create access key using another account's access key and secret key " \
+ "(user name is s3user_2)"
access_key_args = {}
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
#import pdb; pdb.set_trace()
result.command_response_should_have("The request was rejected because it " \
+ "referenced a user that does not exist.")
'''
Teardown:
'''
account_args = {}
test_msg = "Delete account s3test_1"
account_args = {'AccountName': 's3test_1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# restoring previous values for further tests
_use_root_credentials()
def role_tests():
policy_doc = os.path.join(os.path.dirname(__file__), 'resources', 'policy')
policy_doc_full_path = os.path.abspath(policy_doc)
test_msg = 'Create role (Path not specified)'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_role(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'Delete role'
result = AuthTest(test_msg).delete_role(**role_args).execute_test()
result.command_response_should_have("Role deleted.")
test_msg = 'Create role (Path is /test/)'
role_args['Path'] = '/test/'
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_role(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'List role (Path is not given)'
role_args = {}
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).list_roles(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'List role (Path is /test)'
role_response_pattern = "RoleId = S3Test, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).list_roles(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'Delete role'
role_args['RoleName'] = 'S3Test'
result = AuthTest(test_msg).delete_role(**role_args).execute_test()
result.command_response_should_have("Role deleted.")
def saml_provider_tests():
metadata_doc = os.path.join(os.path.dirname(__file__), 'resources', 'saml_metadata')
metadata_doc_full_path = os.path.abspath(metadata_doc)
test_msg = 'Create SAML provider'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
saml_provider_response_pattern = "SAMLProviderArn = [\S]*$"
result = AuthTest(test_msg).create_saml_provider(**saml_provider_args).execute_test()
result.command_should_match_pattern(saml_provider_response_pattern)
response_elements = get_response_elements(result.status.stdout)
saml_provider_args['SAMLProviderArn'] = response_elements['SAMLProviderArn']
test_msg = 'Update SAML provider'
saml_provider_args = {}
saml_provider_args['SAMLProviderArn'] = "arn:seagate:iam:::S3IDP"
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
result = AuthTest(test_msg).update_saml_provider(**saml_provider_args).execute_test()
result.command_response_should_have("SAML provider Updated.")
test_msg = 'List SAML providers'
saml_provider_response_pattern = "ARN = arn:seagate:iam:::S3IDP, ValidUntil = [\S\s]*$"
result = AuthTest(test_msg).list_saml_providers(**saml_provider_args).execute_test()
result.command_should_match_pattern(saml_provider_response_pattern)
test_msg = 'Delete SAML provider'
result = AuthTest(test_msg).delete_saml_provider(**saml_provider_args).execute_test()
result.command_response_should_have("SAML provider deleted.")
test_msg = 'List SAML providers'
result = AuthTest(test_msg).list_saml_providers(**saml_provider_args).execute_test()
result.command_should_match_pattern("")
def get_federation_token_test():
federation_token_args = {}
federation_token_args['Name'] = 's3root'
test_msg = 'Get Federation Token'
response_pattern = "FederatedUserId = [\S]*, AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_federation_token(**federation_token_args).execute_test()
result.command_should_match_pattern(response_pattern)
response_elements = get_response_elements(result.status.stdout)
S3ClientConfig.access_key_id = response_elements['AccessKeyId']
S3ClientConfig.secret_key = response_elements['SecretAccessKey']
S3ClientConfig.token = response_elements['SessionToken']
_use_root_credentials()
test_msg = 'Delete access key'
access_key_args = {}
access_key_args['AccessKeyId'] = response_elements['AccessKeyId']
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3root'
user_args = {}
user_args['UserName'] = "s3root"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
def delete_account_tests():
_use_root_credentials()
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
AuthTest(test_msg).create_user(**user_args).execute_test()\
.command_should_match_pattern(user1_response_pattern)
account_args = {'AccountName': 's3test'}
test_msg = "Delete account s3test should fail"
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("attempted to delete a resource that has attached subordinate entities")
# Test: create a account s3test1 and try to delete account s3test1 using access
# key and secret key of account s3test. Account delete operation should fail.
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
test_msg = "Delete account s3test1 using credentials of account s3test should fail."
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("You are not authorized to perform this operation.")
# Test: delete account s3test with force option [recursively/forcefully]
test_msg = "Delete account s3test"
account_args = {'AccountName': 's3test', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Use invalid access key and secret key of account s3test1
GlobalTestState.root_access_key = "xRZ807dxQEqakueNTBpyNQ#"
GlobalTestState.root_secret_key = "caEE2plJfA1BrhthYsh9H9siEQZtCMF4etvj1o9B"
_use_root_credentials()
# Test: delete account with invalid access key and secret key format
test_msg = "Delete account s3test1 with invalid access key format"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True) \
.command_response_should_have("The AWS access key Id you provided does not exist in our records.")
# Use access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
# Test: delete account without force option
test_msg = "Delete account s3test1"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Test: Account cannot be deleted if it contains some buckets
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
GlobalTestState.root_access_key = account_response_elements['AccessKeyId']
GlobalTestState.root_secret_key = account_response_elements['SecretKey']
_use_root_credentials()
S3ClientConfig.pathstyle = False
S3cmdTest('s3cmd can create bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.create_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Delete account s3test1 containing buckets"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account cannot be deleted as it owns some resources.")
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.delete_bucket("seagatebucket").execute_test().command_is_successful()
# Test: Account cannot be deleted on motr_idx_op fail
test_msg = "Cannot delete account s3test1 on motr_idx_op fail"
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi("enable", "always", "motr_idx_op_fail").\
execute_test().command_is_successful()
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account cannot be deleted")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
test_msg = "Delete account s3test1 contains no buckets"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
def reset_account_accesskey_tests():
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
# Use access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
S3ClientConfig.pathstyle = False
# Create a bucket with just now created Account credentials
S3cmdTest('s3cmd can create bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.create_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Reset account access key"
account_args = {'AccountName': 's3test1', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).reset_account_accesskey(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
test_msg = "Reset account access key with invalid credentials"
account_args = {'AccountName': 's3test1', 'ldapuser': 'sgiamadmin*',
'ldappasswd': 'ldapadmin@'}
result = AuthTest(test_msg).reset_account_accesskey(**account_args).execute_test(negative_case=True)
result.command_should_match_pattern("Account access key wasn't reset.")
#Using old access key should fail now
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.delete_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("")
# Use new access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
# Using new access key should pass now
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key,
GlobalTestState.root_secret_key) \
.delete_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Delete account s3test1"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Limit on Maximum number of credentials for Account which should not exceed two, should not count temporary credentials of account.
#Create account
test_msg = "Create account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "tempAuthTestAccount"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
#Create Account LoginProfile for tempAuthTestAccount"
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="tempAuthTestAccount"
user_args['Password'] = "accountpassword"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#create access key(after this maximum limit of access key creation has met)
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
#create access key(after this maximum limit of access key creation has met)
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("AccessKeyQuotaExceeded")
#Delete account
test_msg = "Delete account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': '[email protected]', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
def auth_health_check_tests():
# e.g curl -s -I -X HEAD https://iam.seagate.com:9443/auth/health
health_check_uri = "/auth/health"
result = AuthTest('Auth server health check').get_auth_health(health_check_uri).\
execute_test().command_is_successful().command_response_should_have("200 OK")
# Validate maxAccount and maxUser limit values from authserver.properties file
def test_max_account_and_user_limit_value_of_auth_config():
print("Updating autherver.properties (/etc/cortx/auth/resources/authserver.properties) file with test values..")
s3confstore = S3CortxConfStore('properties:///etc/cortx/auth/resources/authserver.properties', 'index')
old_maxAccountValue=s3confstore.get_config('maxAccountLimit')
old_maxIAMUserValue=s3confstore.get_config('maxIAMUserLimit')
s3confstore.set_config('maxAccountLimit', '1', True)
s3confstore.set_config('maxIAMUserLimit', '1', True)
os.system('systemctl restart s3authserver')
time.sleep(30) # sometime authserver takes more time to restart
print("auth config values are changed successfully..")
# Try to create two account and it should with MaxAccountLimitExceeded error.
test_msg = "Create account authconfigValidatorAccount1 should successfull."
account_args = {'AccountName': 'authconfigValidatorAccount1', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "authconfigValidatorAccount1"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
test_msg = "Create account authconfigValidatorAccount2 should fail with MaxAccountLimitExceeded with limit as 1."
account_args = {'AccountName': 'authconfigValidatorAccount2', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("MaxAccountLimitExceeded")
test_access_key = S3ClientConfig.access_key_id
test_secret_key = S3ClientConfig.secret_key
# Test IAM User limit
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
test_msg = "Create User s3user1 in authconfigValidatorAccount1 should successful."
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
AuthTest(test_msg).create_user(**user_args).execute_test()\
.command_should_match_pattern(user1_response_pattern)
test_msg = "Create User s3user2 in authconfigValidatorAccount1 should fail with MaxUserLimitExceeded with limit as 1."
user_args = {'UserName': 's3user2'}
AuthTest(test_msg).create_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("MaxUserLimitExceeded")
# cleanup delete iam user
test_msg = "Delete User s3user1 should successfull."
user_args = {}
user_args['UserName'] = "s3user1"
AuthTest(test_msg).delete_user(**user_args).execute_test()\
.command_response_should_have("User deleted.")
# Delete account
test_msg = 'Delete Account should successfull.'
account_args = {}
account_args['AccountName'] = access_key_args['AccountName']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
s3confstore.set_config('maxAccountLimit', old_maxAccountValue, True)
s3confstore.set_config('maxIAMUserLimit', old_maxIAMUserValue, True)
os.system('systemctl restart s3authserver')
time.sleep(30) # sometime authserver takes more time to restart
print("Reverted authserver.properties (/opt/seagate/cortx/auth/resources/authserver.properties) with origional values successfully...")
# Validate delete account functionality with ldap credentials
def delete_acc_ldap_cred_tests():
# DeleteAccount with ldap credentials tests -- starts
test_access_key = S3ClientConfig.access_key_id
test_secret_key = S3ClientConfig.secret_key
test_msg = "Create account s3deletetest for testing Account Deletion with ldap credentials"
account_args = {'AccountName': 's3deletetest', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
AuthTest(test_msg).create_account(**account_args).execute_test()\
.command_should_match_pattern(account_response_pattern)
test_msg = 'DeleteAccount should fails with InvalidAccessKeyId error with wrong ldapadmin username i.e. dummyUser'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = "dummyUser"
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidAccessKeyId")
test_msg = 'DeleteAccount should fails with InvalidAccessKeyId error with empty ldapadmin username'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = ""
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidAccessKeyId")
test_msg = 'DeleteAccount should fails with SignatureDoesNotMatch error with invalid ldappassword i.e. dummykey'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = "dummykey"
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("SignatureDoesNotMatch")
test_msg = 'DeleteAccount should fails with SignatureDoesNotMatch error with empty ldappassword'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = ""
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("SignatureDoesNotMatch")
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
ldap_user_name = S3ClientConfig.ldapuser
ldap_user_passwd = S3ClientConfig.ldappasswd
test_msg = "Create account s3deletetest1 for testing Account Deletion scnearios with ldap credentials"
account_args = {'AccountName': 's3deletetest1', 'Email': '[email protected]', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()\
.command_should_match_pattern(account_response_pattern)
response_elements = get_response_elements(result.status.stdout)
accesskey = response_elements['AccessKeyId']
secretkey = response_elements['SecretKey']
os.environ["AWS_ACCESS_KEY_ID"] = accesskey
os.environ["AWS_SECRET_ACCESS_KEY"] = secretkey
AwsTest('Aws can create bucket').create_bucket("tbucket").execute_test().command_is_successful()
test_msg = 'DeleteAccount should fails with AccountNotEmpty error with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("AccountNotEmpty")
S3ClientConfig.access_key_id = accesskey
S3ClientConfig.secret_key = secretkey
# Delete bucket with account access key
AwsTest('Aws can delete bucket').delete_bucket("tbucket").execute_test().command_is_successful()
# create IAM User and try to delete account
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
# Try to delete account
test_msg = 'DeleteAccount should fails with DeleteConflict error with ldap credentials with IAM user as sub-resource'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("DeleteConflict")
S3ClientConfig.access_key_id = accesskey
S3ClientConfig.secret_key = secretkey
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# DeleteAccount fail if account has bucket/iam-users -- end
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
S3ClientConfig.ldapuser = ldap_user_name
S3ClientConfig.ldappasswd = ldap_user_passwd
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
# DeleteAccount fails with IAM credentials/temp auth credentials of IAM User --- start
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
test_msg = "Create account tempAuthDeleteAccount"
account_args = {'AccountName': 'tempAuthDeleteAccount', 'Email': '[email protected]', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
acc_access_key_args = {}
acc_access_key_args['AccountName'] = "tempAuthDeleteAccount"
acc_access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
acc_access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is s3user1) using account credentials.'
accesskey_response_elements = {}
iam_access_key_args = {}
iam_access_key_args['UserName'] = 's3user1'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**iam_access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
iam_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
iam_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
# Test DeleteAccount should fail with IAM user credentials
test_msg = 'DeleteAccount tempAuthDeleteAccount should fails with InvalidUser error with IAM user access key'
account_args = {}
account_args['AccountName'] ="tempAuthDeleteAccount"
S3ClientConfig.access_key_id = iam_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = iam_access_key_args['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidUser")
date_pattern = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9][\+-][0-9]*:[0-9]*"
test_msg = 'Create UserLoginProfile for user s3user1 should succeed.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="s3user1"
user_args['Password'] ="abcdefg"
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
#Get Temp Auth Credentials for IAM user s3user1
user_access_key_args = {}
user_args['AccountName'] = acc_access_key_args['AccountName']
test_msg = 'Generate GetTempAuthCredentials for IAM User s3user1'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**user_args).execute_test()
result.command_should_match_pattern(response_pattern)
response_elements = get_response_elements(result.status.stdout)
user_access_key_args['AccessKeyId'] = response_elements['AccessKeyId']
user_access_key_args['SecretAccessKey'] = response_elements['SecretAccessKey']
user_access_key_args['SessionToken'] = response_elements['SessionToken']
# Test DeleteAccount with IAM user temp credentials should fail.
test_msg = 'DeleteAccount should fails with InvalidUser error by using IAM user temporary credentials'
account_args = {}
account_args['AccountName'] =acc_access_key_args['AccountName']
S3ClientConfig.access_key_id = user_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = user_access_key_args['SecretAccessKey']
S3ClientConfig.token = user_access_key_args['SessionToken']
result =AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidUser")
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
test_msg = 'Delete IAM users temporary access key for s3user1 should successful.'
user_access_key_args['userName'] = iam_access_key_args['UserName']
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete IAM users access key for s3user1 should successful.'
user_access_key_args['AccessKeyId'] = iam_access_key_args['AccessKeyId']
S3ClientConfig.token = ""
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user1 using account credentials should successful.'
user_args = {}
user_args['UserName'] = iam_access_key_args['UserName']
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
# Test if DeleteAccount successful or not.
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] =acc_access_key_args['AccountName']
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# DeleteAccount fails with IAM credentials/temp auth credentials of IAM User --- end
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
S3ClientConfig.ldapuser = ldap_user_name
S3ClientConfig.ldappasswd = ldap_user_passwd
def execute_all_system_tests():
if Config.no_ssl :
print('Executing auth system tests over HTTP connection')
else:
print('Executing auth system tests over HTTPS connection')
# Do not change the order.
before_all()
test_max_account_and_user_limit_value_of_auth_config()
# time.sleep(5)
account_tests()
user_tests()
accesskey_tests()
role_tests()
saml_provider_tests()
get_federation_token_test()
delete_account_tests()
reset_account_accesskey_tests()
auth_health_check_tests()
delete_acc_ldap_cred_tests()
if __name__ == '__main__':
execute_all_system_tests()
|
[] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_SECRET_ACCESS_KEY",
"SG_ACCESS_KEY",
"SG_SECRET_KEY",
"SG_LDAP_USER",
"AWS_ACCESS_KEY_ID",
"SG_LDAP_PASSWD"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_SECRET_ACCESS_KEY", "SG_ACCESS_KEY", "SG_SECRET_KEY", "SG_LDAP_USER", "AWS_ACCESS_KEY_ID", "SG_LDAP_PASSWD"]
|
python
| 7 | 0 | |
buildlet/remote.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package buildlet
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/build"
"golang.org/x/build/buildenv"
"golang.org/x/build/types"
)
type UserPass struct {
Username string // "user-$USER"
Password string // buildlet key
}
// A CoordinatorClient makes calls to the build coordinator.
type CoordinatorClient struct {
// Auth specifies how to authenticate to the coordinator.
Auth UserPass
// Instance optionally specifies the build coordinator to connect
// to. If zero, the production coordinator is used.
Instance build.CoordinatorInstance
mu sync.Mutex
hc *http.Client
}
func (cc *CoordinatorClient) instance() build.CoordinatorInstance {
if cc.Instance == "" {
return build.ProdCoordinator
}
return cc.Instance
}
func (cc *CoordinatorClient) client() (*http.Client, error) {
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.hc != nil {
return cc.hc, nil
}
cc.hc = &http.Client{
Transport: &http.Transport{
Dial: defaultDialer(),
DialTLS: cc.instance().TLSDialer(),
},
}
return cc.hc, nil
}
// CreateBuildlet creates a new buildlet of the given builder type on
// cc.
//
// This takes a builderType (instead of a hostType), but the
// returned buildlet can be used as any builder that has the same
// underlying buildlet type. For instance, a linux-amd64 buildlet can
// act as either linux-amd64 or linux-386-387.
//
// It may expire at any time.
// To release it, call Client.Close.
func (cc *CoordinatorClient) CreateBuildlet(builderType string) (Client, error) {
return cc.CreateBuildletWithStatus(builderType, nil)
}
const (
// GomoteCreateStreamVersion is the gomote protocol version at which JSON streamed responses started.
GomoteCreateStreamVersion = "20191119"
// GomoteCreateMinVersion is the oldest "gomote create" protocol version that's still supported.
GomoteCreateMinVersion = "20160922"
)
// CreateBuildletWithStatus is like CreateBuildlet but accepts an optional status callback.
func (cc *CoordinatorClient) CreateBuildletWithStatus(builderType string, status func(types.BuildletWaitStatus)) (Client, error) {
hc, err := cc.client()
if err != nil {
return nil, err
}
ipPort, _ := cc.instance().TLSHostPort() // must succeed if client did
form := url.Values{
"version": {GomoteCreateStreamVersion}, // checked by cmd/coordinator/remote.go
"builderType": {builderType},
}
req, _ := http.NewRequest("POST",
"https://"+ipPort+"/buildlet/create",
strings.NewReader(form.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.SetBasicAuth(cc.Auth.Username, cc.Auth.Password)
// TODO: accept a context for deadline/cancelation
res, err := hc.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
slurp, _ := ioutil.ReadAll(res.Body)
return nil, fmt.Errorf("%s: %s", res.Status, slurp)
}
// TODO: delete this once the server's been deployed with it.
// This code only exists for compatibility for a day or two at most.
if res.Header.Get("X-Supported-Version") < GomoteCreateStreamVersion {
var rb RemoteBuildlet
if err := json.NewDecoder(res.Body).Decode(&rb); err != nil {
return nil, err
}
return cc.NamedBuildlet(rb.Name)
}
type msg struct {
Error string `json:"error"`
Buildlet *RemoteBuildlet `json:"buildlet"`
Status *types.BuildletWaitStatus `json:"status"`
}
bs := bufio.NewScanner(res.Body)
for bs.Scan() {
line := bs.Bytes()
var m msg
if err := json.Unmarshal(line, &m); err != nil {
return nil, err
}
if m.Error != "" {
return nil, errors.New(m.Error)
}
if m.Buildlet != nil {
if m.Buildlet.Name == "" {
return nil, fmt.Errorf("buildlet: coordinator's /buildlet/create returned an unnamed buildlet")
}
return cc.NamedBuildlet(m.Buildlet.Name)
}
if m.Status != nil {
if status != nil {
status(*m.Status)
}
continue
}
log.Printf("buildlet: unknown message type from coordinator's /buildlet/create endpoint: %q", line)
continue
}
err = bs.Err()
if err == nil {
err = errors.New("buildlet: coordinator's /buildlet/create ended its response stream without a terminal message")
}
return nil, err
}
type RemoteBuildlet struct {
HostType string // "host-linux-jessie"
BuilderType string // "linux-386-387"
Name string // "buildlet-adg-openbsd-386-2"
Created time.Time
Expires time.Time
}
func (cc *CoordinatorClient) RemoteBuildlets() ([]RemoteBuildlet, error) {
hc, err := cc.client()
if err != nil {
return nil, err
}
ipPort, _ := cc.instance().TLSHostPort() // must succeed if client did
req, _ := http.NewRequest("GET", "https://"+ipPort+"/buildlet/list", nil)
req.SetBasicAuth(cc.Auth.Username, cc.Auth.Password)
res, err := hc.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
slurp, _ := ioutil.ReadAll(res.Body)
return nil, fmt.Errorf("%s: %s", res.Status, slurp)
}
var ret []RemoteBuildlet
if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
return nil, err
}
return ret, nil
}
// NamedBuildlet returns a buildlet client for the named remote buildlet.
// Names are not validated. Use Client.Status to check whether the client works.
func (cc *CoordinatorClient) NamedBuildlet(name string) (Client, error) {
hc, err := cc.client()
if err != nil {
return nil, err
}
ipPort, _ := cc.instance().TLSHostPort() // must succeed if client did
c := &client{
baseURL: "https://" + ipPort,
remoteBuildlet: name,
httpClient: hc,
authUser: cc.Auth.Username,
password: cc.Auth.Password,
}
c.setCommon()
return c, nil
}
var (
flagsRegistered bool
gomoteUserFlag string
)
// RegisterFlags registers "user" and "staging" flags that control the
// behavior of NewCoordinatorClientFromFlags. These are used by remote
// client commands like gomote.
func RegisterFlags() {
if !flagsRegistered {
buildenv.RegisterFlags()
flag.StringVar(&gomoteUserFlag, "user", username(), "gomote server username")
flagsRegistered = true
}
}
// username finds the user's username in the environment.
func username() string {
if runtime.GOOS == "windows" {
return os.Getenv("USERNAME")
}
return os.Getenv("USER")
}
// configDir finds the OS-dependent config dir.
func configDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "Gomote")
}
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
return filepath.Join(xdg, "gomote")
}
return filepath.Join(os.Getenv("HOME"), ".config", "gomote")
}
// userToken reads the gomote token from the user's home directory.
func userToken() (string, error) {
if gomoteUserFlag == "" {
panic("userToken called with user flag empty")
}
keyDir := configDir()
userPath := filepath.Join(keyDir, "user-"+gomoteUserFlag+".user")
b, err := ioutil.ReadFile(userPath)
if err == nil {
gomoteUserFlag = string(bytes.TrimSpace(b))
}
baseFile := "user-" + gomoteUserFlag + ".token"
if buildenv.FromFlags() == buildenv.Staging {
baseFile = "staging-" + baseFile
}
tokenFile := filepath.Join(keyDir, baseFile)
slurp, err := ioutil.ReadFile(tokenFile)
if os.IsNotExist(err) {
return "", fmt.Errorf("Missing file %s for user %q. Change --user or obtain a token and place it there.",
tokenFile, gomoteUserFlag)
}
return strings.TrimSpace(string(slurp)), err
}
// NewCoordinatorClientFromFlags constructs a CoordinatorClient for the current user.
func NewCoordinatorClientFromFlags() (*CoordinatorClient, error) {
if !flagsRegistered {
return nil, errors.New("RegisterFlags not called")
}
inst := build.ProdCoordinator
env := buildenv.FromFlags()
if env == buildenv.Staging {
inst = build.StagingCoordinator
} else if env == buildenv.Development {
inst = "localhost:8119"
}
if gomoteUserFlag == "" {
return nil, errors.New("user flag must be specified")
}
tok, err := userToken()
if err != nil {
return nil, err
}
return &CoordinatorClient{
Auth: UserPass{
Username: "user-" + gomoteUserFlag,
Password: tok,
},
Instance: inst,
}, nil
}
|
[
"\"USERNAME\"",
"\"USER\"",
"\"APPDATA\"",
"\"XDG_CONFIG_HOME\"",
"\"HOME\""
] |
[] |
[
"USERNAME",
"APPDATA",
"USER",
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["USERNAME", "APPDATA", "USER", "HOME", "XDG_CONFIG_HOME"]
|
go
| 5 | 0 | |
mct_json_converter/main_window.py
|
from __future__ import print_function
import sys
import os
import time
import mct_json_converter
from PyQt4 import QtCore
from PyQt4 import QtGui
from converter_ui import Ui_MainWindow
USER_HOME = os.getenv('USERPROFILE')
if USER_HOME is None:
USER_HOME = os.getenv('HOME')
DEFAULT_DIRECTORY = os.path.join(USER_HOME,'mct_log')
else:
DEFAULT_DIRECTORY = USER_HOME
class ConverterMainWindow(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(ConverterMainWindow,self).__init__(parent)
self.setupUi(self)
self.connectActions()
self.initialize()
def initialize(self):
self.matRadioButton.setChecked(True)
self.fileListWidget.setAlternatingRowColors(True)
if os.path.isdir(DEFAULT_DIRECTORY):
self.directory = DEFAULT_DIRECTORY
else:
self.directory = USER_HOME
self.disableWidgetsOnEmpty()
if not mct_json_converter.haveh5py:
self.hdf5RadioButton.setEnabled(False)
def connectActions(self):
self.selectPushButton.clicked.connect(self.selectClicked)
self.clearPushButton.clicked.connect(self.clearClicked)
self.convertPushButton.clicked.connect(self.convertClicked)
def selectClicked(self):
if not os.path.isdir(self.directory):
self.directory = USER_HOME
fileNameList = QtGui.QFileDialog.getOpenFileNames(
self,
'Select JSON files for conversion',
self.directory,
"JSON (*.json)"
)
self.fileListWidget.clear()
if fileNameList:
for name in fileNameList:
listItem = QtGui.QListWidgetItem(name)
self.fileListWidget.addItem(listItem)
self.enableWidgetsOnNonEmpty()
else:
self.disableWidgetsOnEmpty()
def enableWidgetsOnNonEmpty(self):
self.convertPushButton.setEnabled(True)
self.clearPushButton.setEnabled(True)
def disableWidgetsOnEmpty(self):
self.convertPushButton.setEnabled(False)
self.clearPushButton.setEnabled(False)
def enableWidgetsAfterConverting(self):
self.selectPushButton.setEnabled(True)
self.clearPushButton.setEnabled(True)
self.matRadioButton.setEnabled(True)
if mct_json_converter.haveh5py:
self.hdf5RadioButton.setEnabled(True)
self.fileListWidget.setEnabled(True)
self.convertPushButton.setEnabled(True)
def disableWidgetsWhileConverting(self):
self.selectPushButton.setEnabled(False)
self.clearPushButton.setEnabled(False)
self.matRadioButton.setEnabled(False)
self.hdf5RadioButton.setEnabled(False)
self.fileListWidget.setEnabled(False)
self.convertPushButton.setEnabled(False)
def clearClicked(self):
self.fileListWidget.clear()
def convertClicked(self):
self.disableWidgetsWhileConverting()
fileFormat = self.getFileFormat()
numFiles = self.fileListWidget.count()
for i in range(numFiles):
item = self.fileListWidget.item(i)
fileName = str(item.text())
filePath = os.path.join(self.directory,fileName)
statusMessage = ' Converting: {0}/{1}'.format(i+1,numFiles)
self.statusbar.showMessage(statusMessage)
self.repaint()
try:
converter = mct_json_converter.JSON_Converter(filePath)
except Exception, e:
message = 'Unable to convert file: {0}\n\n{1}'.format(fileName,str(e))
QtGui.QMessageBox.critical(self,'Error',message)
self.enableWidgetsAfterConverting()
return
if fileFormat == 'mat':
writeFunc = converter.writeMatFile
elif fileFormat == 'hdf5':
writeFunc = converter.writeH5File
else:
raise RuntimeError, 'unknown file format'
try:
writeFunc()
except Exception, e:
message = 'Unable to convert file: {0}\n\n{1}'.format(fileName,str(e))
QtGui.QMessageBox.critical(self,'Error',message)
self.enableWidgetsAfterConverting()
return
self.statusbar.showMessage(' Done')
self.enableWidgetsAfterConverting()
def getFileFormat(self):
if self.hdf5RadioButton.isChecked():
fileFormat = 'hdf5'
else:
fileFormat = 'mat'
return fileFormat
def converterMain():
app = QtGui.QApplication(sys.argv)
mainWindow = ConverterMainWindow()
mainWindow.show()
app.exec_()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
converterMain()
|
[] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
python
| 2 | 0 | |
people/plenum_session_voters_stats.py
|
from dataflows import Flow, load, dump_to_path, PackageWrapper
import os
import datetime
import re
from collections import defaultdict
from datapackage_pipelines_knesset.common_flow import (rows_counter,
process_rows_remove_resource,
kns_knessetdates_processor,
get_knessetdate,
mk_individual_factions_processor,
mk_individual_names_processor,
get_mk_faction_ids)
def get_plenum_session_start_date(plenum_session):
start_date = plenum_session['StartDate'].date()
if start_date < datetime.date(1947, 1, 1):
m = re.findall('([0-9]+)/([0-9]+)/([0-9]+)', plenum_session['Name'])
assert m, 'failed to find date for session {}'.format(plenum_session)
assert len(m) == 1
start_date = datetime.date(*map(int, reversed(m[0])))
return start_date
def flow():
data_path = 'data{}/'.format('_samples' if os.environ.get('KNESSET_DATA_SAMPLES') else '')
kns_knessetdates_sorted = []
mk_individual_factions = {}
all_mk_ids = set()
session_voted_mk_ids = {}
aggregates = {}
def process_session_voters(rows):
for row in rows_counter('session_voters', rows):
session_voted_mk_ids[row['PlenumSessionID']] = row['voter_mk_ids']
def get_session_voted_mk_ids(session_id):
attended_mk_ids = session_voted_mk_ids.get(session_id)
return attended_mk_ids if attended_mk_ids else []
def process_kns_plenumsession(sessions):
for session in rows_counter('kns_plenumsession', sessions):
session_date = get_plenum_session_start_date(session)
voted_mk_ids = get_session_voted_mk_ids(session['PlenumSessionID'])
for mk_id, faction_id in get_mk_faction_ids(all_mk_ids, mk_individual_factions,
session_date):
knessetdate = get_knessetdate(kns_knessetdates_sorted, session_date)
agg = aggregates.setdefault(knessetdate['knesset'], {})\
.setdefault(knessetdate['plenum'], {})\
.setdefault(knessetdate['assembly'], {})\
.setdefault(knessetdate['pagra'], {})\
.setdefault(faction_id, {})\
.setdefault(mk_id, defaultdict(int))
if mk_id in voted_mk_ids:
agg['voted_sessions'] += 1
agg['total_sessions'] += 1
def get_all_aggregates():
for knesset, aggs in aggregates.items():
for plenum, aggs in aggs.items():
for assembly, aggs in aggs.items():
for pagra, aggs in aggs.items():
for faction_id, aggs in aggs.items():
for mk_id, agg in aggs.items():
yield (knesset, plenum, assembly, pagra,
faction_id, mk_id), agg
def get_mk_aggregates():
for agg_key, agg in get_all_aggregates():
if agg.get('total_sessions', 0) > 0:
knesset, plenum, assembly, pagra, faction_id, mk_id = agg_key
yield dict({'voted_sessions': 0,
'total_sessions': 0,
'voted_sessions_percent': 0, },
**agg, knesset=knesset, plenum=plenum, assembly=assembly,
pagra=int(pagra), faction_id=faction_id, mk_id=mk_id)
def get_aggregates(package: PackageWrapper):
schema_fields = [{'name': 'knesset', 'type': 'integer'},
{'name': 'plenum', 'type': 'integer'},
{'name': 'assembly', 'type': 'integer'},
{'name': 'pagra', 'type': 'integer'},
{'name': 'faction_id', 'type': 'integer'},
{'name': 'mk_id', 'type': 'integer'},
{'name': 'voted_sessions', 'type': 'integer'},
{'name': 'total_sessions', 'type': 'integer'},
{'name': 'voted_sessions_percent', 'type': 'integer'},]
package.pkg.add_resource({'name': 'plenum_session_voters_stats',
'path': 'plenum_session_voters_stats.csv',
'schema': {'fields': schema_fields}})
yield package.pkg
yield from package
min_voted_sessions_percent = 100
max_voted_sessions_percent = 0
for agg_key, agg in get_all_aggregates():
total_sessions = agg.get('total_sessions', 0)
if total_sessions > 0:
voted_sessions_percent = int(agg.get('voted_sessions', 0)
/ total_sessions * 100)
agg['voted_sessions_percent'] = voted_sessions_percent
if voted_sessions_percent < min_voted_sessions_percent:
min_voted_sessions_percent = voted_sessions_percent
elif voted_sessions_percent > max_voted_sessions_percent:
max_voted_sessions_percent = voted_sessions_percent
yield get_mk_aggregates()
return Flow(load(data_path + 'members/mk_individual/datapackage.json',
resources=['mk_individual_names']),
process_rows_remove_resource('mk_individual_names',
mk_individual_names_processor(all_mk_ids)),
load(data_path + 'members/mk_individual/datapackage.json',
resources=['mk_individual_factions']),
process_rows_remove_resource('mk_individual_factions',
mk_individual_factions_processor(mk_individual_factions)),
load(data_path + 'knesset/kns_knessetdates/datapackage.json',
resources=['kns_knessetdates']),
process_rows_remove_resource('kns_knessetdates',
kns_knessetdates_processor(kns_knessetdates_sorted)),
load(data_path + 'people/plenum/session_voters/datapackage.json',
resources=['kns_plenumsession']),
process_rows_remove_resource('kns_plenumsession',
process_session_voters),
load(data_path + 'plenum/kns_plenumsession/datapackage.json',
resources=['kns_plenumsession']),
process_rows_remove_resource('kns_plenumsession',
process_kns_plenumsession),
get_aggregates,
dump_to_path('data/people/plenum/session_voters_stats'),
)
if __name__ == '__main__':
flow().process()
|
[] |
[] |
[
"KNESSET_DATA_SAMPLES"
] |
[]
|
["KNESSET_DATA_SAMPLES"]
|
python
| 1 | 0 | |
test/e2e/report_test.go
|
package e2e
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"testing"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
reportTestTimeout = 5 * time.Minute
reportTestOutputDirectory string
runAWSBillingTests bool
)
func init() {
if reportTestTimeoutStr := os.Getenv("REPORT_TEST_TIMEOUT"); reportTestTimeoutStr != "" {
var err error
reportTestTimeout, err = time.ParseDuration(reportTestTimeoutStr)
if err != nil {
log.Fatalf("Invalid REPORT_TEST_TIMEOUT: %v", err)
}
}
reportTestOutputDirectory = os.Getenv("TEST_RESULT_REPORT_OUTPUT_DIRECTORY")
if reportTestOutputDirectory == "" {
log.Fatalf("$TEST_RESULT_REPORT_OUTPUT_DIRECTORY must be set")
}
err := os.MkdirAll(reportTestOutputDirectory, 0777)
if err != nil {
log.Fatalf("error making directory %s, err: %s", reportTestOutputDirectory, err)
}
runAWSBillingTests = os.Getenv("ENABLE_AWS_BILLING_TESTS") == "true"
}
func TestReportsProduceData(t *testing.T) {
tests := []struct {
// name is the name of the sub test but also the name of the report.
name string
queryName string
timeout time.Duration
skip bool
}{
{
name: "namespace-cpu-request",
queryName: "namespace-cpu-request",
timeout: reportTestTimeout,
},
{
name: "namespace-cpu-usage",
queryName: "namespace-cpu-usage",
timeout: reportTestTimeout,
},
{
name: "namespace-memory-request",
queryName: "namespace-memory-request",
timeout: reportTestTimeout + time.Minute,
},
{
name: "namespace-memory-usage",
queryName: "namespace-memory-usage",
timeout: reportTestTimeout + time.Minute,
},
{
name: "pod-cpu-request",
queryName: "pod-cpu-request",
timeout: reportTestTimeout,
},
{
name: "pod-cpu-usage",
queryName: "pod-cpu-usage",
timeout: reportTestTimeout,
},
{
name: "pod-memory-request",
queryName: "pod-memory-request",
timeout: reportTestTimeout,
},
{
name: "pod-memory-usage",
queryName: "pod-memory-usage",
timeout: reportTestTimeout,
},
{
name: "pod-memory-request-vs-node-memory-allocatable",
queryName: "pod-memory-request-vs-node-memory-allocatable",
timeout: reportTestTimeout + time.Minute,
},
{
name: "node-cpu-utilization",
queryName: "node-cpu-utilization",
timeout: reportTestTimeout,
},
{
name: "node-memory-utilization",
queryName: "node-memory-utilization",
timeout: reportTestTimeout,
},
{
name: "pod-cpu-request-aws",
queryName: "pod-cpu-request-aws",
timeout: reportTestTimeout,
skip: !runAWSBillingTests,
},
{
name: "pod-memory-request-aws",
queryName: "pod-memory-request-aws",
timeout: reportTestTimeout,
skip: !runAWSBillingTests,
},
{
name: "aws-ec2-cluster-cost",
queryName: "aws-ec2-cluster-cost",
timeout: reportTestTimeout,
skip: !runAWSBillingTests,
},
}
reportStart, reportEnd := testFramework.CollectMetricsOnce(t)
t.Logf("reportStart: %s, reportEnd: %s", reportStart, reportEnd)
for i, test := range tests {
// Fix closure captures
test := test
i := i
// The JVM has a warm up time and the first report always takes longer
// than others, so give it a longer timeout
if i == 0 {
test.timeout += time.Minute
}
t.Run(test.name, func(t *testing.T) {
if testing.Short() && i != 0 {
t.Skip("skipping test in short mode")
return
}
if test.skip {
t.Skip("test configured to be skipped")
return
}
report := testFramework.NewSimpleReport(test.name, test.queryName, reportStart, reportEnd)
err := testFramework.MeteringClient.Reports(testFramework.Namespace).Delete(report.Name, nil)
assert.Condition(t, func() bool {
return err == nil || errors.IsNotFound(err)
}, "failed to ensure report doesn't exist before creating report")
t.Logf("creating report %s", report.Name)
err = testFramework.CreateMeteringReport(report)
require.NoError(t, err, "creating report should succeed")
defer func() {
t.Logf("deleting report %s", report.Name)
err := testFramework.MeteringClient.Reports(testFramework.Namespace).Delete(report.Name, nil)
assert.NoError(t, err, "expected delete report to succeed")
}()
query := map[string]string{
"name": test.name,
"format": "json",
}
var reportResults []map[string]interface{}
var reportData []byte
err = wait.Poll(time.Second*5, test.timeout, func() (bool, error) {
req := testFramework.NewReportingOperatorSVCRequest("/api/v1/reports/get", query)
result := req.Do()
resp, err := result.Raw()
if err != nil {
return false, fmt.Errorf("error querying metering service got error: %v, body: %v", err, string(resp))
}
var statusCode int
result.StatusCode(&statusCode)
if statusCode == http.StatusAccepted {
t.Logf("report is still running")
return false, nil
}
require.Equal(t, http.StatusOK, statusCode, "http response status code should be ok")
err = json.Unmarshal(resp, &reportResults)
require.NoError(t, err, "expected to unmarshal response")
reportData = resp
return true, nil
})
require.NoError(t, err, "expected getting report result to not timeout")
assert.NotEmpty(t, reportResults, "reports should return at least 1 row")
fileName := path.Join(reportTestOutputDirectory, fmt.Sprintf("%s.json", test.name))
err = ioutil.WriteFile(fileName, reportData, os.ModePerm)
require.NoError(t, err, "expected writing report results to disk not to error")
})
}
}
|
[
"\"REPORT_TEST_TIMEOUT\"",
"\"TEST_RESULT_REPORT_OUTPUT_DIRECTORY\"",
"\"ENABLE_AWS_BILLING_TESTS\""
] |
[] |
[
"TEST_RESULT_REPORT_OUTPUT_DIRECTORY",
"ENABLE_AWS_BILLING_TESTS",
"REPORT_TEST_TIMEOUT"
] |
[]
|
["TEST_RESULT_REPORT_OUTPUT_DIRECTORY", "ENABLE_AWS_BILLING_TESTS", "REPORT_TEST_TIMEOUT"]
|
go
| 3 | 0 | |
stackoverflowRecommender/apps.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.apps import AppConfig
class StackoverflowrecommenderConfig(AppConfig):
name = 'stackoverflowRecommender'
def __init__(self, app_name, app_module):
super(StackoverflowrecommenderConfig, self).__init__(app_name, app_module)
# self.sparseDf = None
# def getResourcesPath(self):
# # repo_path = str(pathlib.Path(os.getcwd()).parent)
# repo_path = "C:/Users/Iancu/PycharmProjects/Stackoverflow_Recommendations/stackoverflow-recommendations"
# return repo_path + '/resources/matrix.npz'
# def ready(self):
# if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'):
# path = self.read_file('recomme-196220.appspot.com/matrix.npz')
# else:
# path = self.getResourcesPath()
# if self.sparseDf is None:
# self.sparseDf = SparseDataframe(filePath=path)
# print('Dataframe initialized')
# del (path)
|
[] |
[] |
[
"SERVER_SOFTWARE"
] |
[]
|
["SERVER_SOFTWARE"]
|
python
| 1 | 0 | |
apps/challenges/aws_utils.py
|
import json
import logging
import os
import random
import string
import uuid
import yaml
from botocore.exceptions import ClientError
from django.conf import settings
from django.core import serializers
from django.core.files.temp import NamedTemporaryFile
from http import HTTPStatus
from .challenge_notification_util import (
construct_and_send_worker_start_mail,
construct_and_send_eks_cluster_creation_mail,
)
from base.utils import get_boto3_client, send_email
from evalai.celery import app
from accounts.models import JwtToken
logger = logging.getLogger(__name__)
DJANGO_SETTINGS_MODULE = os.environ.get("DJANGO_SETTINGS_MODULE")
ENV = DJANGO_SETTINGS_MODULE.split(".")[-1]
EVALAI_DNS = os.environ.get("SERVICE_DNS")
aws_keys = {
"AWS_ACCOUNT_ID": os.environ.get("AWS_ACCOUNT_ID", "x"),
"AWS_ACCESS_KEY_ID": os.environ.get("AWS_ACCESS_KEY_ID", "x"),
"AWS_SECRET_ACCESS_KEY": os.environ.get("AWS_SECRET_ACCESS_KEY", "x"),
"AWS_REGION": os.environ.get("AWS_DEFAULT_REGION", "us-east-1"),
"AWS_STORAGE_BUCKET_NAME": os.environ.get(
"AWS_STORAGE_BUCKET_NAME", "evalai-s3-bucket"
),
}
COMMON_SETTINGS_DICT = {
"EXECUTION_ROLE_ARN": os.environ.get(
"EXECUTION_ROLE_ARN",
"arn:aws:iam::{}:role/evalaiTaskExecutionRole".format(
aws_keys["AWS_ACCOUNT_ID"]
),
),
"WORKER_IMAGE": os.environ.get(
"WORKER_IMAGE",
"{}.dkr.ecr.us-east-1.amazonaws.com/evalai-{}-worker:latest".format(
aws_keys["AWS_ACCOUNT_ID"], ENV
),
),
"CODE_UPLOAD_WORKER_IMAGE": os.environ.get(
"CODE_UPLOAD_WORKER_IMAGE",
"{}.dkr.ecr.us-east-1.amazonaws.com/evalai-{}-worker:latest".format(
aws_keys["AWS_ACCOUNT_ID"], ENV
),
),
"CIDR": os.environ.get("CIDR"),
"CLUSTER": os.environ.get("CLUSTER", "evalai-prod-cluster"),
"DJANGO_SERVER": os.environ.get("DJANGO_SERVER", "localhost"),
"EVALAI_API_SERVER": os.environ.get("EVALAI_API_SERVER", "localhost"),
"DEBUG": settings.DEBUG,
"EMAIL_HOST": settings.EMAIL_HOST,
"EMAIL_HOST_PASSWORD": settings.EMAIL_HOST_PASSWORD,
"EMAIL_HOST_USER": settings.EMAIL_HOST_USER,
"EMAIL_PORT": settings.EMAIL_PORT,
"EMAIL_USE_TLS": settings.EMAIL_USE_TLS,
"MEMCACHED_LOCATION": os.environ.get("MEMCACHED_LOCATION", None),
"RDS_DB_NAME": settings.DATABASES["default"]["NAME"],
"RDS_HOSTNAME": settings.DATABASES["default"]["HOST"],
"RDS_PASSWORD": settings.DATABASES["default"]["PASSWORD"],
"RDS_USERNAME": settings.DATABASES["default"]["USER"],
"RDS_PORT": settings.DATABASES["default"]["PORT"],
"SECRET_KEY": settings.SECRET_KEY,
"SENTRY_URL": os.environ.get("SENTRY_URL"),
}
VPC_DICT = {
"SUBNET_1": os.environ.get("SUBNET_1", "subnet1"),
"SUBNET_2": os.environ.get("SUBNET_2", "subnet2"),
"SUBNET_SECURITY_GROUP": os.environ.get("SUBNET_SECURITY_GROUP", "sg"),
}
task_definition = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
task_definition_static_code_upload_worker = """
{{
"family":"{queue_name}",
"executionRoleArn":"{EXECUTION_ROLE_ARN}",
"networkMode":"awsvpc",
"containerDefinitions":[
{{
"name": "{code_upload_container_name}",
"image": "{CODE_UPLOAD_WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "CLUSTER_NAME",
"value": "{cluster_name}"
}},
{{
"name": "CLUSTER_ENDPOINT",
"value": "{cluster_endpoint}"
}},
{{
"name": "CERTIFICATE",
"value": "{certificate}"
}},
{{
"name": "CIDR",
"value": "{CIDR}"
}},
{{
"name": "QUEUE_NAME",
"value": "{queue_name}"
}},
{{
"name": "EVALAI_API_SERVER",
"value": "{EVALAI_API_SERVER}"
}},
{{
"name": "AUTH_TOKEN",
"value": "{auth_token}"
}},
{{
"name": "EVALAI_DNS",
"value": "{EVALAI_DNS}"
}},
{{
"name": "EFS_ID",
"value": "{EFS_ID}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}},
{{
"name": "{container_name}",
"image": "{WORKER_IMAGE}",
"essential": True,
"environment": [
{{
"name": "AWS_DEFAULT_REGION",
"value": "{AWS_REGION}"
}},
{{
"name": "AWS_ACCOUNT_ID",
"value": "{AWS_ACCOUNT_ID}"
}},
{{
"name": "AWS_ACCESS_KEY_ID",
"value": "{AWS_ACCESS_KEY_ID}"
}},
{{
"name": "AWS_SECRET_ACCESS_KEY",
"value": "{AWS_SECRET_ACCESS_KEY}"
}},
{{
"name": "AWS_STORAGE_BUCKET_NAME",
"value": "{AWS_STORAGE_BUCKET_NAME}"
}},
{{
"name": "CHALLENGE_PK",
"value": "{challenge_pk}"
}},
{{
"name": "CHALLENGE_QUEUE",
"value": "{queue_name}"
}},
{{
"name": "DJANGO_SERVER",
"value": "{DJANGO_SERVER}"
}},
{{
"name": "DJANGO_SETTINGS_MODULE",
"value": "settings.{ENV}"
}},
{{
"name": "DEBUG",
"value": "{DEBUG}"
}},
{{
"name": "EMAIL_HOST",
"value": "{EMAIL_HOST}"
}},
{{
"name": "EMAIL_HOST_PASSWORD",
"value": "{EMAIL_HOST_PASSWORD}"
}},
{{
"name": "EMAIL_HOST_USER",
"value": "{EMAIL_HOST_USER}"
}},
{{
"name": "EMAIL_PORT",
"value": "{EMAIL_PORT}"
}},
{{
"name": "EMAIL_USE_TLS",
"value": "{EMAIL_USE_TLS}"
}},
{{
"name": "MEMCACHED_LOCATION",
"value": "{MEMCACHED_LOCATION}"
}},
{{
"name": "PYTHONUNBUFFERED",
"value": "1"
}},
{{
"name": "RDS_DB_NAME",
"value": "{RDS_DB_NAME}"
}},
{{
"name": "RDS_HOSTNAME",
"value": "{RDS_HOSTNAME}"
}},
{{
"name": "RDS_PASSWORD",
"value": "{RDS_PASSWORD}"
}},
{{
"name": "RDS_USERNAME",
"value": "{RDS_USERNAME}"
}},
{{
"name": "RDS_PORT",
"value": "{RDS_PORT}"
}},
{{
"name": "SECRET_KEY",
"value": "{SECRET_KEY}"
}},
{{
"name": "SENTRY_URL",
"value": "{SENTRY_URL}"
}},
{{
"name": "AWS_SES_REGION_NAME",
"value": "{AWS_SES_REGION_NAME}"
}},
{{
"name": "AWS_SES_REGION_ENDPOINT",
"value": "{AWS_SES_REGION_ENDPOINT}"
}}
],
"workingDirectory": "/code",
"readonlyRootFilesystem": False,
"logConfiguration": {{
"logDriver": "awslogs",
"options": {{
"awslogs-group": "{log_group_name}",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "{queue_name}",
"awslogs-create-group": "true",
}},
}},
}}
],
"requiresCompatibilities":[
"FARGATE"
],
"cpu": "{CPU}",
"memory": "{MEMORY}",
}}
"""
service_definition = """
{{
"cluster":"{CLUSTER}",
"serviceName":"{service_name}",
"taskDefinition":"{task_def_arn}",
"desiredCount":1,
"clientToken":"{client_token}",
"launchType":"FARGATE",
"platformVersion":"LATEST",
"networkConfiguration":{{
"awsvpcConfiguration": {{
"subnets": [
"{SUBNET_1}",
"{SUBNET_2}",
],
'securityGroups': [
"{SUBNET_SECURITY_GROUP}",
],
"assignPublicIp": "ENABLED"
}}
}},
"schedulingStrategy":"REPLICA",
"deploymentController":{{
"type": "ECS"
}},
}}
"""
update_service_args = """
{{
"cluster":"{CLUSTER}",
"service":"{service_name}",
"desiredCount":num_of_tasks,
"taskDefinition":"{task_def_arn}",
"forceNewDeployment":{force_new_deployment}
}}
"""
delete_service_args = """
{{
"cluster": "{CLUSTER}",
"service": "{service_name}",
"force": False
}}
"""
def get_code_upload_setup_meta_for_challenge(challenge_pk):
"""
Return the EKS cluster network and arn meta for a challenge
Arguments:
challenge_pk {int} -- challenge pk for which credentails are to be fetched
Returns:
code_upload_meta {dict} -- Dict containing cluster network and arn meta
"""
from .models import ChallengeEvaluationCluster
from .utils import get_challenge_model
challenge = get_challenge_model(challenge_pk)
if challenge.use_host_credentials:
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge
)
code_upload_meta = {
"SUBNET_1": challenge_evaluation_cluster.subnet_1_id,
"SUBNET_2": challenge_evaluation_cluster.subnet_2_id,
"SUBNET_SECURITY_GROUP": challenge_evaluation_cluster.security_group_id,
"EKS_NODEGROUP_ROLE_ARN": challenge_evaluation_cluster.node_group_arn_role,
"EKS_CLUSTER_ROLE_ARN": challenge_evaluation_cluster.eks_arn_role,
}
else:
code_upload_meta = {
"SUBNET_1": VPC_DICT["SUBNET_1"],
"SUBNET_2": VPC_DICT["SUBNET_2"],
"SUBNET_SECURITY_GROUP": VPC_DICT["SUBNET_SECURITY_GROUP"],
"EKS_NODEGROUP_ROLE_ARN": settings.EKS_NODEGROUP_ROLE_ARN,
"EKS_CLUSTER_ROLE_ARN": settings.EKS_CLUSTER_ROLE_ARN,
}
return code_upload_meta
def get_log_group_name(challenge_pk):
log_group_name = "challenge-pk-{}-{}-workers".format(
challenge_pk, settings.ENVIRONMENT
)
return log_group_name
def client_token_generator(challenge_pk):
"""
Returns a 32 characters long client token to ensure idempotency with create_service boto3 requests.
Parameters: None
Returns:
str: string of size 32 composed of digits and letters
"""
remaining_chars = 32 - len(str(challenge_pk))
random_char_string = "".join(
random.choices(string.ascii_letters + string.digits, k=remaining_chars)
)
client_token = f"{str(challenge_pk)}{random_char_string}"
return client_token
def register_task_def_by_challenge_pk(client, queue_name, challenge):
"""
Registers the task definition of the worker for a challenge, before creating a service.
Parameters:
client (boto3.client): the client used for making requests to ECS.
queue_name (str): queue_name is the queue field of the Challenge model used in many parameters fof the task def.
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
Returns:
dict: A dict of the task definition and it's ARN if succesful, and an error dictionary if not
"""
container_name = "worker_{}".format(queue_name)
code_upload_container_name = "code_upload_worker_{}".format(queue_name)
worker_cpu_cores = challenge.worker_cpu_cores
worker_memory = challenge.worker_memory
log_group_name = get_log_group_name(challenge.pk)
execution_role_arn = COMMON_SETTINGS_DICT["EXECUTION_ROLE_ARN"]
AWS_SES_REGION_NAME = settings.AWS_SES_REGION_NAME
AWS_SES_REGION_ENDPOINT = settings.AWS_SES_REGION_ENDPOINT
if execution_role_arn:
from .utils import get_aws_credentials_for_challenge
challenge_aws_keys = get_aws_credentials_for_challenge(challenge.pk)
if challenge.is_docker_based:
from .models import ChallengeEvaluationCluster
# Cluster detail to be used by code-upload-worker
try:
cluster_details = ChallengeEvaluationCluster.objects.get(
challenge=challenge
)
cluster_name = cluster_details.name
cluster_endpoint = cluster_details.cluster_endpoint
cluster_certificate = cluster_details.cluster_ssl
efs_id = cluster_details.efs_id
except ClientError as e:
logger.exception(e)
return e.response
# challenge host auth token to be used by code-upload-worker
token = JwtToken.objects.get(user=challenge.creator.created_by)
if challenge.is_static_dataset_docker_based_challenge:
definition = task_definition_static_code_upload_worker.format(
queue_name=queue_name,
container_name=container_name,
code_upload_container_name=code_upload_container_name,
ENV=ENV,
challenge_pk=challenge.pk,
auth_token=token.refresh_token,
cluster_name=cluster_name,
cluster_endpoint=cluster_endpoint,
certificate=cluster_certificate,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
EVALAI_DNS=EVALAI_DNS,
EFS_ID=efs_id,
AWS_SES_REGION_NAME=AWS_SES_REGION_NAME,
AWS_SES_REGION_ENDPOINT=AWS_SES_REGION_ENDPOINT,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
else:
definition = task_definition_code_upload_worker.format(
queue_name=queue_name,
code_upload_container_name=code_upload_container_name,
ENV=ENV,
challenge_pk=challenge.pk,
auth_token=token.refresh_token,
cluster_name=cluster_name,
cluster_endpoint=cluster_endpoint,
certificate=cluster_certificate,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
EVALAI_DNS=EVALAI_DNS,
EFS_ID=efs_id,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
else:
definition = task_definition.format(
queue_name=queue_name,
container_name=container_name,
ENV=ENV,
challenge_pk=challenge.pk,
CPU=worker_cpu_cores,
MEMORY=worker_memory,
log_group_name=log_group_name,
AWS_SES_REGION_NAME=AWS_SES_REGION_NAME,
AWS_SES_REGION_ENDPOINT=AWS_SES_REGION_ENDPOINT,
**COMMON_SETTINGS_DICT,
**challenge_aws_keys,
)
definition = eval(definition)
if not challenge.task_def_arn:
try:
response = client.register_task_definition(**definition)
if (
response["ResponseMetadata"]["HTTPStatusCode"]
== HTTPStatus.OK
):
task_def_arn = response["taskDefinition"][
"taskDefinitionArn"
]
challenge.task_def_arn = task_def_arn
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
else:
message = "Error. Task definition already registered for challenge {}.".format(
challenge.pk
)
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
else:
message = "Please ensure that the TASK_EXECUTION_ROLE_ARN is appropriately passed as an environment varible."
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
def create_service_by_challenge_pk(client, challenge, client_token):
"""
Creates the worker service for a challenge, and sets the number of workers to one.
Parameters:
client (boto3.client): the client used for making requests to ECS
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
client_token (str): The client token generated by client_token_generator()
Returns:
dict: The response returned by the create_service method from boto3. If unsuccesful, returns an error dictionary
"""
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
if (
challenge.workers is None
): # Verify if the challenge is new (i.e, service not yet created.).
if challenge.task_def_arn == "" or challenge.task_def_arn is None:
response = register_task_def_by_challenge_pk(
client, queue_name, challenge
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
return response
task_def_arn = challenge.task_def_arn
definition = service_definition.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
task_def_arn=task_def_arn,
client_token=client_token,
**VPC_DICT,
)
definition = eval(definition)
try:
response = client.create_service(**definition)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = 1
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
else:
message = "Worker service for challenge {} already exists. Please scale, stop or delete.".format(
challenge.pk
)
return {
"Error": message,
"ResponseMetadata": {"HTTPStatusCode": HTTPStatus.BAD_REQUEST},
}
def update_service_by_challenge_pk(
client, challenge, num_of_tasks, force_new_deployment=False
):
"""
Updates the worker service for a challenge, and scales the number of workers to num_of_tasks.
Parameters:
client (boto3.client): the client used for making requests to ECS
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
num_of_tasks (int): Number of workers to scale to for the challenge.
force_new_deployment (bool): Set True (mainly for restarting) to specify if you want to redploy with the latest image from ECR. Default is False.
Returns:
dict: The response returned by the update_service method from boto3. If unsuccesful, returns an error dictionary
"""
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
task_def_arn = challenge.task_def_arn
kwargs = update_service_args.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
task_def_arn=task_def_arn,
force_new_deployment=force_new_deployment,
)
kwargs = eval(kwargs)
try:
response = client.update_service(**kwargs)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = num_of_tasks
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
def delete_service_by_challenge_pk(challenge):
"""
Deletes the workers service of a challenge.
Before deleting, it scales down the number of workers in the service to 0, then proceeds to delete the service.
Parameters:
challenge (<class 'challenges.models.Challenge'>): The challenge object for whom the task definition is being registered.
Returns:
dict: The response returned by the delete_service method from boto3
"""
client = get_boto3_client("ecs", aws_keys)
queue_name = challenge.queue
service_name = "{}_service".format(queue_name)
kwargs = delete_service_args.format(
CLUSTER=COMMON_SETTINGS_DICT["CLUSTER"],
service_name=service_name,
force=True,
)
kwargs = eval(kwargs)
try:
if challenge.workers != 0:
response = update_service_by_challenge_pk(
client, challenge, 0, False
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
return response
response = client.delete_service(**kwargs)
if response["ResponseMetadata"]["HTTPStatusCode"] == HTTPStatus.OK:
challenge.workers = None
challenge.save()
client.deregister_task_definition(
taskDefinition=challenge.task_def_arn
)
challenge.task_def_arn = ""
challenge.save()
return response
except ClientError as e:
logger.exception(e)
return e.response
def service_manager(
client, challenge, num_of_tasks=None, force_new_deployment=False
):
"""
This method determines if the challenge is new or not, and accordingly calls <update or create>_by_challenge_pk.
Called by: Start, Stop & Scale methods for multiple workers.
Parameters:
client (boto3.client): the client used for making requests to ECS.
challenge (): The challenge object for whom the task definition is being registered.
num_of_tasks: The number of workers to scale to (relevant only if the challenge is not new).
default: None
Returns:
dict: The response returned by the respective functions update_service_by_challenge_pk or create_service_by_challenge_pk
"""
if challenge.workers is not None:
response = update_service_by_challenge_pk(
client, challenge, num_of_tasks, force_new_deployment
)
return response
else:
client_token = client_token_generator(challenge.pk)
response = create_service_by_challenge_pk(
client, challenge, client_token
)
return response
def start_workers(queryset):
"""
The function called by the admin action method to start all the selected workers.
Calls the service_manager method. Before calling, checks if all the workers are incactive.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully started.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be started on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if (challenge.workers == 0) or (challenge.workers is None):
response = service_manager(
client, challenge=challenge, num_of_tasks=1
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenge with inactive workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def stop_workers(queryset):
"""
The function called by the admin action method to stop all the selected workers.
Calls the service_manager method. Before calling, verifies that the challenge is not new, and is active.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be stopped on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if (challenge.workers is not None) and (challenge.workers > 0):
response = service_manager(
client, challenge=challenge, num_of_tasks=0
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def scale_workers(queryset, num_of_tasks):
"""
The function called by the admin action method to scale all the selected workers.
Calls the service_manager method. Before calling, checks if the target scaling number is different than current.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully started.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be scaled on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if challenge.workers is None:
response = "Please start worker(s) before scaling."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
continue
if num_of_tasks == challenge.workers:
response = "Please scale to a different number. Challenge has {} worker(s).".format(
num_of_tasks
)
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
continue
response = service_manager(
client, challenge=challenge, num_of_tasks=num_of_tasks
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{"message": response["Error"], "challenge_pk": challenge.pk}
)
continue
count += 1
return {"count": count, "failures": failures}
def delete_workers(queryset):
"""
The function called by the admin action method to delete all the selected workers.
Calls the delete_service_by_challenge_pk method. Before calling, verifies that the challenge is not new.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be deleted on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
count = 0
failures = []
for challenge in queryset:
if challenge.workers is not None:
response = delete_service_by_challenge_pk(challenge=challenge)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
log_group_name = get_log_group_name(challenge.pk)
delete_log_group(log_group_name)
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def restart_workers(queryset):
"""
The function called by the admin action method to restart all the selected workers.
Calls the service_manager method. Before calling, verifies that the challenge worker(s) is(are) active.
Parameters:
queryset (<class 'django.db.models.query.QuerySet'>): The queryset of selected challenges in the django admin page.
Returns:
dict: keys-> 'count': the number of workers successfully stopped.
'failures': a dict of all the failures with their error messages and the challenge pk
"""
if settings.DEBUG:
failures = []
for challenge in queryset:
failures.append(
{
"message": "Workers cannot be restarted on AWS ECS service in development environment",
"challenge_pk": challenge.pk,
}
)
return {"count": 0, "failures": failures}
client = get_boto3_client("ecs", aws_keys)
count = 0
failures = []
for challenge in queryset:
if challenge.is_docker_based:
response = "Sorry. This feature is not available for code upload/docker based challenges."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
elif (challenge.workers is not None) and (challenge.workers > 0):
response = service_manager(
client,
challenge=challenge,
num_of_tasks=challenge.workers,
force_new_deployment=True,
)
if response["ResponseMetadata"]["HTTPStatusCode"] != HTTPStatus.OK:
failures.append(
{
"message": response["Error"],
"challenge_pk": challenge.pk,
}
)
continue
count += 1
else:
response = "Please select challenges with active workers only."
failures.append(
{"message": response, "challenge_pk": challenge.pk}
)
return {"count": count, "failures": failures}
def restart_workers_signal_callback(sender, instance, field_name, **kwargs):
"""
Called when either evaluation_script or test_annotation_script for challenge
is updated, to restart the challenge workers.
"""
if settings.DEBUG:
return
prev = getattr(instance, "_original_{}".format(field_name))
curr = getattr(instance, "{}".format(field_name))
if field_name == "evaluation_script":
instance._original_evaluation_script = curr
elif field_name == "test_annotation":
instance._original_test_annotation = curr
if prev != curr:
challenge = None
if field_name == "test_annotation":
challenge = instance.challenge
else:
challenge = instance
response = restart_workers([challenge])
count, failures = response["count"], response["failures"]
logger.info(
"The worker service for challenge {} was restarted, as {} was changed.".format(
challenge.pk, field_name
)
)
if count != 1:
logger.warning(
"Worker(s) for challenge {} couldn't restart! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
else:
challenge_url = "{}/web/challenges/challenge-page/{}".format(
settings.EVALAI_API_SERVER, challenge.id
)
challenge_manage_url = (
"{}/web/challenges/challenge-page/{}/manage".format(
settings.EVALAI_API_SERVER, challenge.id
)
)
if field_name == "test_annotation":
file_updated = "Test Annotation"
elif field_name == "evaluation_script":
file_updated = "Evaluation script"
template_data = {
"CHALLENGE_NAME": challenge.title,
"CHALLENGE_MANAGE_URL": challenge_manage_url,
"CHALLENGE_URL": challenge_url,
"FILE_UPDATED": file_updated,
}
if challenge.image:
template_data["CHALLENGE_IMAGE_URL"] = challenge.image.url
template_id = settings.SENDGRID_SETTINGS.get("TEMPLATES").get(
"WORKER_RESTART_EMAIL"
)
# Send email notification only when inform_hosts is true
if challenge.inform_hosts:
emails = challenge.creator.get_all_challenge_host_email()
for email in emails:
send_email(
sender=settings.CLOUDCV_TEAM_EMAIL,
recipient=email,
template_id=template_id,
template_data=template_data,
)
def get_logs_from_cloudwatch(
log_group_name, log_stream_prefix, start_time, end_time, pattern
):
"""
To fetch logs of a container from cloudwatch within a specific time frame.
"""
client = get_boto3_client("logs", aws_keys)
logs = []
if settings.DEBUG:
logs = [
"The worker logs in the development environment are available on the terminal. Please use docker-compose logs -f worker to view the logs."
]
else:
try:
response = client.filter_log_events(
logGroupName=log_group_name,
logStreamNamePrefix=log_stream_prefix,
startTime=start_time,
endTime=end_time,
filterPattern=pattern,
)
for event in response["events"]:
logs.append(event["message"])
except Exception as e:
if e.response["Error"]["Code"] == "ResourceNotFoundException":
return logs
logger.exception(e)
return [
f"There is an error in displaying logs. Please find the full error traceback here {e}"
]
return logs
def delete_log_group(log_group_name):
if settings.DEBUG:
pass
else:
try:
client = get_boto3_client("logs", aws_keys)
client.delete_log_group(logGroupName=log_group_name)
except Exception as e:
logger.exception(e)
@app.task
def create_eks_nodegroup(challenge, cluster_name):
"""
Creates a nodegroup when a EKS cluster is created by the EvalAI admin
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
cluster_name {str} -- name of eks cluster
"""
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
nodegroup_name = "{}-{}-nodegroup".format(
challenge_obj.title.replace(" ", "-"), environment_suffix
)
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
client = get_boto3_client("eks", challenge_aws_keys)
cluster_meta = get_code_upload_setup_meta_for_challenge(challenge_obj.pk)
# TODO: Move the hardcoded cluster configuration such as the
# instance_type, subnets, AMI to challenge configuration later.
try:
response = client.create_nodegroup(
clusterName=cluster_name,
nodegroupName=nodegroup_name,
scalingConfig={
"minSize": challenge_obj.min_worker_instance,
"maxSize": challenge_obj.max_worker_instance,
"desiredSize": challenge_obj.desired_worker_instance,
},
diskSize=challenge_obj.worker_disk_size,
subnets=[cluster_meta["SUBNET_1"], cluster_meta["SUBNET_2"]],
instanceTypes=[challenge_obj.worker_instance_type],
amiType=challenge_obj.worker_ami_type,
nodeRole=cluster_meta["EKS_NODEGROUP_ROLE_ARN"],
)
logger.info("Nodegroup create: {}".format(response))
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("nodegroup_active")
waiter.wait(clusterName=cluster_name, nodegroupName=nodegroup_name)
construct_and_send_eks_cluster_creation_mail(challenge_obj)
# starting the code-upload-worker
client = get_boto3_client("ecs", aws_keys)
client_token = client_token_generator(challenge_obj.pk)
create_service_by_challenge_pk(client, challenge_obj, client_token)
@app.task
def setup_eks_cluster(challenge):
"""
Creates EKS and NodeGroup ARN roles
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
client = get_boto3_client("iam", challenge_aws_keys)
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
eks_role_name = "evalai-code-upload-eks-role-{}".format(environment_suffix)
eks_arn_role = None
try:
response = client.create_role(
RoleName=eks_role_name,
Description="Amazon EKS cluster role with managed policy",
AssumeRolePolicyDocument=json.dumps(
settings.EKS_CLUSTER_TRUST_RELATION
),
)
eks_arn_role = response["Role"]["Arn"]
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("role_exists")
waiter.wait(RoleName=eks_role_name)
try:
# Attach AWS managed EKS cluster policy to the role
response = client.attach_role_policy(
RoleName=eks_role_name,
PolicyArn=settings.EKS_CLUSTER_POLICY,
)
except ClientError as e:
logger.exception(e)
return
node_group_role_name = "evalai-code-upload-nodegroup-role-{}".format(
environment_suffix
)
node_group_arn_role = None
try:
response = client.create_role(
RoleName=node_group_role_name,
Description="Amazon EKS node group role with managed policy",
AssumeRolePolicyDocument=json.dumps(
settings.EKS_NODE_GROUP_TRUST_RELATION
),
)
node_group_arn_role = response["Role"]["Arn"]
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("role_exists")
waiter.wait(RoleName=node_group_role_name)
task_execution_policies = settings.EKS_NODE_GROUP_POLICIES
for policy_arn in task_execution_policies:
try:
# Attach AWS managed EKS worker node policy to the role
response = client.attach_role_policy(
RoleName=node_group_role_name,
PolicyArn=policy_arn,
)
except ClientError as e:
logger.exception(e)
return
# Create custom ECR all access policy and attach to node_group_role
ecr_all_access_policy_name = "AWS-ECR-Full-Access-{}".format(
environment_suffix
)
ecr_all_access_policy_arn = None
try:
response = client.create_policy(
PolicyName=ecr_all_access_policy_name,
PolicyDocument=json.dumps(settings.ECR_ALL_ACCESS_POLICY_DOCUMENT),
)
ecr_all_access_policy_arn = response["Policy"]["Arn"]
waiter = client.get_waiter("policy_exists")
waiter.wait(PolicyArn=ecr_all_access_policy_arn)
# Attach custom ECR policy
response = client.attach_role_policy(
RoleName=node_group_role_name, PolicyArn=ecr_all_access_policy_arn
)
except ClientError as e:
logger.exception(e)
return
try:
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge_obj
)
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"eks_arn_role": eks_arn_role,
"node_group_arn_role": node_group_arn_role,
"ecr_all_access_policy_arn": ecr_all_access_policy_arn,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Create eks cluster vpc and subnets
create_eks_cluster_subnets.delay(challenge)
except Exception as e:
logger.exception(e)
return
@app.task
def create_eks_cluster_subnets(challenge):
"""
Creates EKS and NodeGroup ARN roles
Arguments:
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
challenge_aws_keys = get_aws_credentials_for_challenge(challenge_obj.pk)
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
client = get_boto3_client("ec2", challenge_aws_keys)
vpc_ids = []
try:
response = client.create_vpc(CidrBlock=challenge_obj.vpc_cidr)
vpc_ids.append(response["Vpc"]["VpcId"])
except ClientError as e:
logger.exception(e)
return
waiter = client.get_waiter("vpc_available")
waiter.wait(VpcIds=vpc_ids)
# Create internet gateway and attach to vpc
try:
# Enable DNS resolution for VPC
response = client.modify_vpc_attribute(
EnableDnsHostnames={"Value": True}, VpcId=vpc_ids[0]
)
response = client.create_internet_gateway()
internet_gateway_id = response["InternetGateway"]["InternetGatewayId"]
client.attach_internet_gateway(
InternetGatewayId=internet_gateway_id, VpcId=vpc_ids[0]
)
# Create and attach route table
response = client.create_route_table(VpcId=vpc_ids[0])
route_table_id = response["RouteTable"]["RouteTableId"]
client.create_route(
DestinationCidrBlock="0.0.0.0/0",
GatewayId=internet_gateway_id,
RouteTableId=route_table_id,
)
# Create subnets
subnet_ids = []
response = client.create_subnet(
CidrBlock=challenge_obj.subnet_1_cidr,
AvailabilityZone="us-east-1a",
VpcId=vpc_ids[0],
)
subnet_1_id = response["Subnet"]["SubnetId"]
subnet_ids.append(subnet_1_id)
response = client.create_subnet(
CidrBlock=challenge_obj.subnet_2_cidr,
AvailabilityZone="us-east-1b",
VpcId=vpc_ids[0],
)
subnet_2_id = response["Subnet"]["SubnetId"]
subnet_ids.append(subnet_2_id)
waiter = client.get_waiter("subnet_available")
waiter.wait(SubnetIds=subnet_ids)
# Creating managed node group needs subnets to auto assign ip v4
for subnet_id in subnet_ids:
response = client.modify_subnet_attribute(
MapPublicIpOnLaunch={
"Value": True,
},
SubnetId=subnet_id,
)
# Associate route table with subnets
response = client.associate_route_table(
RouteTableId=route_table_id,
SubnetId=subnet_1_id,
)
response = client.associate_route_table(
RouteTableId=route_table_id,
SubnetId=subnet_2_id,
)
# Create security group
response = client.create_security_group(
GroupName="EvalAI code upload challenge",
Description="EvalAI code upload challenge worker group",
VpcId=vpc_ids[0],
)
security_group_id = response["GroupId"]
response = client.create_security_group(
GroupName="evalai-code-upload-challenge-efs-{}".format(
environment_suffix
),
Description="EKS nodegroup EFS",
VpcId=vpc_ids[0],
)
efs_security_group_id = response["GroupId"]
response = client.authorize_security_group_ingress(
GroupId=efs_security_group_id,
IpPermissions=[
{
"FromPort": 2049,
"IpProtocol": "tcp",
"IpRanges": [
{
"CidrIp": challenge_obj.vpc_cidr,
},
],
"ToPort": 2049,
}
],
)
# Create EFS
efs_client = get_boto3_client("efs", challenge_aws_keys)
efs_creation_token = str(uuid.uuid4())[:64]
response = efs_client.create_file_system(
CreationToken=efs_creation_token,
)
efs_id = response["FileSystemId"]
challenge_evaluation_cluster = ChallengeEvaluationCluster.objects.get(
challenge=challenge_obj
)
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"vpc_id": vpc_ids[0],
"internet_gateway_id": internet_gateway_id,
"route_table_id": route_table_id,
"security_group_id": security_group_id,
"subnet_1_id": subnet_1_id,
"subnet_2_id": subnet_2_id,
"efs_security_group_id": efs_security_group_id,
"efs_id": efs_id,
"efs_creation_token": efs_creation_token,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Create eks cluster
create_eks_cluster.delay(challenge)
except ClientError as e:
logger.exception(e)
return
@app.task
def create_eks_cluster(challenge):
"""
Called when Challenge is approved by the EvalAI admin
calls the create_eks_nodegroup function
Arguments:
sender {type} -- model field called the post hook
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook
"""
from .models import ChallengeEvaluationCluster
from .serializers import ChallengeEvaluationClusterSerializer
from .utils import get_aws_credentials_for_challenge
for obj in serializers.deserialize("json", challenge):
challenge_obj = obj.object
environment_suffix = "{}-{}".format(challenge_obj.pk, settings.ENVIRONMENT)
cluster_name = "{}-{}-cluster".format(
challenge_obj.title.replace(" ", "-"), environment_suffix
)
if challenge_obj.approved_by_admin and challenge_obj.is_docker_based:
challenge_aws_keys = get_aws_credentials_for_challenge(
challenge_obj.pk
)
client = get_boto3_client("eks", challenge_aws_keys)
cluster_meta = get_code_upload_setup_meta_for_challenge(
challenge_obj.pk
)
try:
response = client.create_cluster(
name=cluster_name,
version="1.15",
roleArn=cluster_meta["EKS_CLUSTER_ROLE_ARN"],
resourcesVpcConfig={
"subnetIds": [
cluster_meta["SUBNET_1"],
cluster_meta["SUBNET_2"],
],
"securityGroupIds": [
cluster_meta["SUBNET_SECURITY_GROUP"]
],
},
)
waiter = client.get_waiter("cluster_active")
waiter.wait(name=cluster_name)
# creating kubeconfig
cluster = client.describe_cluster(name=cluster_name)
cluster_cert = cluster["cluster"]["certificateAuthority"]["data"]
cluster_ep = cluster["cluster"]["endpoint"]
cluster_config = {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"cluster": {
"server": str(cluster_ep),
"certificate-authority-data": str(cluster_cert),
},
"name": "kubernetes",
}
],
"contexts": [
{
"context": {"cluster": "kubernetes", "user": "aws"},
"name": "aws",
}
],
"current-context": "aws",
"preferences": {},
"users": [
{
"name": "aws",
"user": {
"exec": {
"apiVersion": "client.authentication.k8s.io/v1alpha1",
"command": "heptio-authenticator-aws",
"args": ["token", "-i", cluster_name],
}
},
}
],
}
# Write in YAML.
config_text = yaml.dump(cluster_config, default_flow_style=False)
config_file = NamedTemporaryFile(delete=True)
config_file.write(config_text.encode())
challenge_evaluation_cluster = (
ChallengeEvaluationCluster.objects.get(challenge=challenge_obj)
)
efs_client = get_boto3_client("efs", challenge_aws_keys)
# Create mount targets for subnets
mount_target_ids = []
response = efs_client.create_mount_target(
FileSystemId=challenge_evaluation_cluster.efs_id,
SubnetId=challenge_evaluation_cluster.subnet_1_id,
SecurityGroups=[
challenge_evaluation_cluster.efs_security_group_id
],
)
mount_target_ids.append(response["MountTargetId"])
response = efs_client.create_mount_target(
FileSystemId=challenge_evaluation_cluster.efs_id,
SubnetId=challenge_evaluation_cluster.subnet_2_id,
SecurityGroups=[
challenge_evaluation_cluster.efs_security_group_id
],
)
mount_target_ids.append(response["MountTargetId"])
serializer = ChallengeEvaluationClusterSerializer(
challenge_evaluation_cluster,
data={
"name": cluster_name,
"cluster_endpoint": cluster_ep,
"cluster_ssl": cluster_cert,
"efs_mount_target_ids": mount_target_ids,
},
partial=True,
)
if serializer.is_valid():
serializer.save()
# Creating nodegroup
create_eks_nodegroup.delay(challenge, cluster_name)
return response
except ClientError as e:
logger.exception(e)
return
def challenge_approval_callback(sender, instance, field_name, **kwargs):
"""This is to check if a challenge has been approved or disapproved since last time.
On approval of a challenge, it launches a worker on Fargate.
On disapproval, it scales down the workers to 0, and deletes the challenge's service on Fargate.
Arguments:
sender -- The model which initated this callback (Challenge)
instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model (a challenge object)
field_name {str} -- The name of the field to check for a change (approved_by_admin)
"""
prev = getattr(instance, "_original_{}".format(field_name))
curr = getattr(instance, "{}".format(field_name))
challenge = instance
challenge._original_approved_by_admin = curr
if not challenge.is_docker_based and challenge.remote_evaluation is False:
if curr and not prev:
if not challenge.workers:
response = start_workers([challenge])
count, failures = response["count"], response["failures"]
if not count:
logger.error(
"Worker for challenge {} couldn't start! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
else:
construct_and_send_worker_start_mail(challenge)
if prev and not curr:
if challenge.workers:
response = delete_workers([challenge])
count, failures = response["count"], response["failures"]
if not count:
logger.error(
"Worker for challenge {} couldn't be deleted! Error: {}".format(
challenge.id, failures[0]["message"]
)
)
|
[] |
[] |
[
"SENTRY_URL",
"CIDR",
"AWS_STORAGE_BUCKET_NAME",
"SUBNET_2",
"CLUSTER",
"AWS_DEFAULT_REGION",
"DJANGO_SERVER",
"EXECUTION_ROLE_ARN",
"SUBNET_SECURITY_GROUP",
"EVALAI_API_SERVER",
"MEMCACHED_LOCATION",
"AWS_ACCOUNT_ID",
"SUBNET_1",
"SERVICE_DNS",
"AWS_SECRET_ACCESS_KEY",
"WORKER_IMAGE",
"AWS_ACCESS_KEY_ID",
"DJANGO_SETTINGS_MODULE",
"CODE_UPLOAD_WORKER_IMAGE"
] |
[]
|
["SENTRY_URL", "CIDR", "AWS_STORAGE_BUCKET_NAME", "SUBNET_2", "CLUSTER", "AWS_DEFAULT_REGION", "DJANGO_SERVER", "EXECUTION_ROLE_ARN", "SUBNET_SECURITY_GROUP", "EVALAI_API_SERVER", "MEMCACHED_LOCATION", "AWS_ACCOUNT_ID", "SUBNET_1", "SERVICE_DNS", "AWS_SECRET_ACCESS_KEY", "WORKER_IMAGE", "AWS_ACCESS_KEY_ID", "DJANGO_SETTINGS_MODULE", "CODE_UPLOAD_WORKER_IMAGE"]
|
python
| 19 | 0 | |
library/python/testing/yatest_lib/ya.py
|
import os
import sys
import logging
import json
try:
import pytest
except ImportError:
pass
from .tools import to_str
from .external import ExternalDataInfo
TESTING_OUT_DIR_NAME = "testing_out_stuff" # XXX import from test.const
yatest_logger = logging.getLogger("ya.test")
class RunMode(object):
Run = "run"
List = "list"
class TestMisconfigurationException(Exception):
pass
class Ya(object):
"""
Adds integration with ya, helps in finding dependencies
"""
def __init__(
self,
mode=None,
source_root=None,
build_root=None,
dep_roots=None,
output_dir=None,
test_params=None,
context=None,
python_path=None,
valgrind_path=None,
gdb_path=None,
data_root=None,
):
context_file_path = os.environ.get("YA_TEST_CONTEXT_FILE", None)
if context_file_path:
with open(context_file_path, 'r') as afile:
test_context = json.load(afile)
context_runtime = test_context["runtime"]
context_internal = test_context.get("internal", {})
context_build = test_context.get("build", {})
else:
context_runtime = {}
context_internal = {}
context_build = {}
self._mode = mode
self._build_root = to_str(context_runtime.get("build_root", "")) or build_root
self._source_root = to_str(context_runtime.get("source_root", "")) or source_root or self._detect_source_root()
self._output_dir = to_str(context_runtime.get("output_path", "")) or output_dir or self._detect_output_root()
if not self._output_dir:
raise Exception("Run ya make -t before running test binary")
if not self._source_root:
logging.warning("Source root was not set neither determined, use --source-root to set it explicitly")
if not self._build_root:
if self._source_root:
self._build_root = self._source_root
else:
logging.warning("Build root was not set neither determined, use --build-root to set it explicitly")
if data_root:
self._data_root = data_root
elif self._source_root:
self._data_root = os.path.abspath(os.path.join(self._source_root, "..", "arcadia_tests_data"))
self._dep_roots = dep_roots
self._python_path = to_str(context_runtime.get("python_bin", "")) or python_path
self._valgrind_path = valgrind_path
self._gdb_path = to_str(context_runtime.get("gdb_bin", "")) or gdb_path
self._test_params = {}
self._context = {}
self._test_item_node_id = None
ram_drive_path = to_str(context_runtime.get("ram_drive_path", ""))
if ram_drive_path:
self._test_params["ram_drive_path"] = ram_drive_path
if test_params:
self._test_params.update(dict(x.split('=', 1) for x in test_params))
self._test_params.update(context_runtime.get("test_params", {}))
self._context["project_path"] = context_runtime.get("project_path")
self._context["modulo"] = context_runtime.get("split_count", 1)
self._context["modulo_index"] = context_runtime.get("split_index", 0)
self._context["sanitize"] = context_build.get("sanitizer")
self._context["ya_trace_path"] = context_internal.get("trace_file")
self._env_file = context_internal.get("env_file")
if context:
self._context.update(context)
@property
def source_root(self):
return self._source_root
@property
def data_root(self):
return self._data_root
@property
def build_root(self):
return self._build_root
@property
def dep_roots(self):
return self._dep_roots
@property
def output_dir(self):
return self._output_dir
@property
def python_path(self):
return self._python_path or sys.executable
@property
def valgrind_path(self):
if not self._valgrind_path:
raise ValueError("path to valgrind was not pass correctly, use --valgrind-path to fix it")
return self._valgrind_path
@property
def gdb_path(self):
return self._gdb_path
@property
def env_file(self):
return self._env_file
def get_binary(self, *path):
assert self._build_root, "Build root was not set neither determined, use --build-root to set it explicitly"
path = list(path)
if os.name == "nt":
if not path[-1].endswith(".exe"):
path[-1] += ".exe"
target_dirs = [self.build_root]
# Search for binaries within PATH dirs to be able to get path to the binaries specified by basename for exectests
if 'PATH' in os.environ:
target_dirs += os.environ['PATH'].split(':')
for target_dir in target_dirs:
binary_path = os.path.join(target_dir, *path)
if os.path.exists(binary_path):
yatest_logger.debug("Binary was found by %s", binary_path)
return binary_path
error_message = "Cannot find binary '{binary}': make sure it was added in the DEPENDS section".format(binary=path)
yatest_logger.debug(error_message)
if self._mode == RunMode.Run:
raise TestMisconfigurationException(error_message)
def file(self, path, diff_tool=None, local=False, diff_file_name=None, diff_tool_timeout=None):
return ExternalDataInfo.serialize_file(path, diff_tool=diff_tool, local=local, diff_file_name=diff_file_name, diff_tool_timeout=diff_tool_timeout)
def get_param(self, key, default=None):
return self._test_params.get(key, default)
def get_param_dict_copy(self):
return dict(self._test_params)
def get_context(self, key):
return self._context.get(key)
def _detect_source_root(self):
root = None
try:
import library.python.find_root
# try to determine source root from cwd
cwd = os.getcwd()
root = library.python.find_root.detect_root(cwd)
if not root:
# try to determine root pretending we are in the test work dir made from --keep-temps run
env_subdir = os.path.join("environment", "arcadia")
root = library.python.find_root.detect_root(cwd, detector=lambda p: os.path.exists(os.path.join(p, env_subdir)))
except ImportError:
logging.warning("Unable to import library.python.find_root")
return root
def _detect_output_root(self):
# if run from kept test working dir
if os.path.exists(TESTING_OUT_DIR_NAME):
return TESTING_OUT_DIR_NAME
# if run from source dir
if sys.version_info.major == 3:
test_results_dir = "py3test"
else:
test_results_dir = "pytest"
test_results_output_path = os.path.join("test-results", test_results_dir, TESTING_OUT_DIR_NAME)
if os.path.exists(test_results_output_path):
return test_results_output_path
if os.path.exists(os.path.dirname(test_results_output_path)):
os.mkdir(test_results_output_path)
return test_results_output_path
return None
def set_test_item_node_id(self, node_id):
self._test_item_node_id = node_id
def get_test_item_node_id(self):
assert self._test_item_node_id
return self._test_item_node_id
def set_metric_value(self, name, val):
node_id = self.get_test_item_node_id()
if node_id not in pytest.config.test_metrics:
pytest.config.test_metrics[node_id] = {}
pytest.config.test_metrics[node_id][name] = val
def get_metric_value(self, name, default=None):
res = pytest.config.test_metrics.get(self.get_test_item_node_id(), {}).get(name)
if res is None:
return default
return res
|
[] |
[] |
[
"PATH",
"YA_TEST_CONTEXT_FILE"
] |
[]
|
["PATH", "YA_TEST_CONTEXT_FILE"]
|
python
| 2 | 0 | |
app.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2015, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
import os
import datetime
import re
from flask import Flask, render_template, request, abort, redirect, url_for, make_response, session, flash
from werkzeug.contrib.atom import AtomFeed
import requests
import iso8601
import pytz
import updater
import open311tools
__version__ = '1.0.2'
# Config
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'configuration.py')
# Quick-start config. You should really put something in
# ./configuration.py or set the SRTRACKER_CONFIGURATION env var instead.
DEBUG = True
OPEN311_SERVER = 'http://localhost:5000'
OPEN311_API_KEY = ''
PASSWORD_PROTECTED = False
SECRET_KEY = 'please_please_change_this!'
app = Flask(__name__)
@app.before_request
def password_protect():
# don't password-protect images (for e-mail!)
if app.config['PASSWORD_PROTECTED'] and not request.path.startswith('/static/img'):
auth = request.authorization
if not auth or auth.password != app.config['PASSWORD']:
# Tell the browser to do basic auth
return make_response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
#--------------------------------------------------------------------------
# ROUTES
#--------------------------------------------------------------------------
@app.route("/", defaults={'page': 1, 'service_code': ''})
@app.route("/<int:page>", defaults={'service_code': ''})
@app.route("/<int:page>/<service_code>")
def index(page, service_code):
if 'filter' in request.args:
service_code = request.args['filter']
url = '%s/requests.json' % app.config['OPEN311_SERVER']
recent_sr_timeframe = app.config.get('RECENT_SRS_TIME')
# If SRS_PAGE_SIZE is set, use paging. Otherwise, fall back to a non-paged list from MAX_RECENT_SRS
page_size = app.config.get('SRS_PAGE_SIZE')
paged = page_size > 0
if not paged:
page_size = app.config.get('MAX_RECENT_SRS', 50)
page = 1
services_list = open311tools.services(app.config['OPEN311_SERVER'], app.config['OPEN311_API_KEY'])
service_name = ''
for service in services_list:
if service_code == service['service_code']:
service_name = service['service_name']
break
if not service_name:
service_code = ''
params = {
'extensions': 'true',
'page_size': page_size,
'page': page,
'service_code': service_code
}
if recent_sr_timeframe:
start_datetime = datetime.datetime.utcnow() - datetime.timedelta(seconds=recent_sr_timeframe)
params['start_date'] = start_datetime.isoformat() + 'Z'
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code != 200:
app.logger.error('OPEN311: Failed to load recent requests from Open311 server. Status Code: %s, Response: %s', r.status_code, r.text)
service_requests = None
else:
# need to slice with page_size in case an endpoint doesn't support page_size its API (it's non-standard)
service_requests = r.json[:page_size]
# we might receive SRs that were updated in the future (!); pretend like those updates were just now.
# fixes https://github.com/codeforamerica/srtracker/issues/80
now = datetime.datetime.utcnow()
for sr in service_requests:
if 'updated_datetime' in sr:
# parse and ensure the date is naive for comparison to utcnow
updated = iso8601.parse_date(sr['updated_datetime']) \
.astimezone(pytz.utc).replace(tzinfo=None)
sr['updated_datetime'] = min(now, updated)
return render_app_template('index.html',
service_requests = service_requests,
page = page,
services_list = services_list,
service_code = service_code,
service_name = service_name)
@app.route("/requests/")
def request_search():
if 'request_id' in request.args:
return redirect(url_for('show_request', request_id=request.args['request_id']))
else:
abort(404)
@app.route("/requests/<request_id>", methods=["GET", "POST"])
def show_request(request_id):
request_id = request_id.lstrip('#')
# receive subscription
form_errors = []
submitted_email = None
if request.method == 'POST':
submitted_email = request.form.get('update_email')
if submitted_email:
success = subscribe_to_sr(request_id, submitted_email)
if not success:
form_errors.append('Please use a valid e-mail address.')
# TODO: Should probably use Three or something nice for this...
url = '%s/requests/%s.json' % (app.config['OPEN311_SERVER'], request_id)
params = {'extensions': 'true', 'legacy': 'false'}
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code == 404:
# TODO: how to generalize this?
# Chicago's SR IDs are always \d\d-\d{8}, if we get just digits, reformat and try again
request_id_digits = re.sub(r'\D', '', request_id)
if len(request_id_digits) == 8:
# Try prepending the year if it's only 8 digits
request_id_digits = datetime.date.today().strftime('%y') + request_id_digits
if len(request_id_digits) == 10:
reformatted = '%s-%s' % (request_id_digits[:2], request_id_digits[2:])
if reformatted != request_id:
return redirect(url_for('show_request', request_id=reformatted))
# It would be nice to log this for analytical purposes (what requests are being checked that we can't show?)
# but that would be better done through GA or KISS Metrics than through server logging
services = open311tools.services(app.config['OPEN311_SERVER'], app.config['OPEN311_API_KEY'])
return render_app_template('error_no_sr.html', request_id=request_id, services=services), 404
elif r.status_code != 200:
app.logger.error('OPEN311: Error (not 404) loading data for SR %s', request_id)
return render_app_template('error_311_api.html', request_id=request_id), 500
srs = r.json
if srs:
sr = fixup_sr(srs[0], request_id)
if 'requested_datetime' in sr:
sr['requested_datetime'] = iso8601.parse_date(sr['requested_datetime'])
# sometimes an SR doesn't include notes even though there should always be an "opened" note
if 'notes' not in sr:
sr['notes'] = []
relevant_notes = 0
for note in sr['notes']:
note['datetime'] = iso8601.parse_date(note['datetime'])
if note['type'] in ('follow_on', 'follow_on_created', 'activity', 'closed'):
relevant_notes += 1
# add follow-on closure data, fix types, etc, etc
by_id = {}
follow_on_open_count = 0
follow_on_close_count = 0
for note in sr['notes']:
if note['type'] in ('follow_on', 'follow_on_created', 'follow_on_closed'):
note_sr_id = note['extended_attributes']['service_request_id']
# old-style is just "follow_on" for everything related to follow-ons
# new-style is "follow_on_created" and "follow_on_closed"
# update old notes so templates don't get crazy complicated :(
if note['type'] == 'follow_on_created' or note['description'].endswith('Created'):
note['type'] = 'follow_on_created'
follow_on_open_count += 1
by_id[note_sr_id] = note
elif note['type'] == 'follow_on_closed' or note['description'].endswith('Closed'):
follow_on_close_count += 1
note['type'] = 'follow_on_closed'
if note_sr_id in by_id:
original = by_id[note_sr_id]
original['extended_attributes']['closed_datetime'] = note['datetime']
# if we hit any follow_on_opened notes
if follow_on_open_count > 0:
# remove the notes that claim the request is closed
sr['notes'] = [n for n in sr['notes'] if not n['type'] == 'closed']
# set the request to open
sr['status'] = 'open'
# if we hit as many follow_on_closed as follow_on_opened notes, then request is really closed
if follow_on_open_count == follow_on_close_count:
# set the request status to closed
sr['status'] = 'closed'
tmp_note = {}
# add a closing note
tmp_note['type'] = 'closed'
tmp_note['summary'] = 'Request Completed'
# this is brittle, but shouldn't break
tmp_datetime = sorted([n['extended_attributes']['closed_datetime'] for n in by_id.values()])
# set the closed datetime to be the datetime of the last-closed follow-on
tmp_note['datetime'] = tmp_datetime[0]
# add the extra note
sr['notes'].append(tmp_note)
# if there's no activity yet, show 'under review'
if relevant_notes == 0:
sr['notes'].append({
'type': 'activity',
'summary': 'Under review by %s staff' % sr.get('agency_responsible', '')
})
subscribed = False
if sr['status'] == 'open' and session.get('addr', None):
# TODO: when subscription service supports more than e-mail,
# we should probably be able to show all your subscriptions here
subscribed = updater.subscription_exists(request_id, 'email', session.get('addr', ''))
# test media
# sr['media_url'] = sr['media_url'] or 'http://farm5.staticflickr.com/4068/4286605571_c1a1751fdc_n.jpg'
body = render_app_template('service_request.html', sr=sr, subscribed=subscribed, errors=form_errors, submitted_email=submitted_email)
return (body, 200, None)
else:
return render_app_template('error_no_sr.html', request_id=request_id), 404
@app.route("/subscribe/<request_id>", methods=["POST"])
def subscribe(request_id):
email = request.form.get('update_email')
if email:
success = subscribe_to_sr(request_id, email)
if not success:
flash('Please use a valid e-mail address.', 'error')
return redirect(url_for('show_request', request_id=request_id))
@app.route("/unsubscribe/<subscription_key>", methods=["GET", "POST"])
def unsubscribe(subscription_key):
subscription = updater.subscription_for_key(subscription_key)
if subscription:
sr_id = subscription.sr_id
updater.unsubscribe_with_key(subscription_key)
destination = url_for('show_request', request_id=sr_id)
else:
destination = url_for('index')
flash(u'You‘ve been unsubscribed from this service request. You will no longer receive e-mails when it is updated.')
return redirect(destination)
#--------------------------------------------------------------------------
# SYNDICATION
#--------------------------------------------------------------------------
@app.route('/recent.atom')
def recent_feed():
atom_size = app.config.get('ATOM_SIZE', 25)
url = '%s/requests.json' % app.config['OPEN311_SERVER']
recent_sr_timeframe = app.config.get('RECENT_SRS_TIME')
params = {
'extensions': 'true',
'page_size': atom_size
}
if recent_sr_timeframe:
start_datetime = datetime.datetime.utcnow() - datetime.timedelta(seconds=recent_sr_timeframe)
params['start_date'] = start_datetime.isoformat() + 'Z'
if app.config['OPEN311_API_KEY']:
params['api_key'] = app.config['OPEN311_API_KEY']
r = requests.get(url, params=params)
if r.status_code != 200:
app.logger.error('OPEN311: Failed to load recent requests from Open311 server. Status Code: %s, Response: %s', r.status_code, r.text)
service_requests = None
else:
# need to slice with atom_size in case an endpoint doesn't support page_size
service_requests = r.json[:atom_size]
# generate feed
feed = AtomFeed('Recently Updated Service Requests',
feed_url=request.url, url=request.url_root)
if service_requests:
for sr in service_requests:
if 'service_request_id' in sr:
sr['requested_datetime'] = iso8601.parse_date(sr['requested_datetime'])
sr['updated_datetime'] = iso8601.parse_date(sr['updated_datetime'])
title = '%s #%s' % (sr['service_name'], sr['service_request_id'])
# in principle, this could be the result of a templating operation
body = sr.get('description','')
if body:
body += '<br /><br />'
body += sr['address']
feed.add(title,
unicode(body),
content_type='html',
author=sr['agency_responsible'],
url=url_for('show_request',
request_id=sr['service_request_id']),
updated=sr['updated_datetime'],
published=sr['requested_datetime'])
return feed.get_response()
#--------------------------------------------------------------------------
# ERRORS
#--------------------------------------------------------------------------
@app.errorhandler(404)
def page_not_found(error):
return render_app_template('error_404.html'), 404
@app.errorhandler(500)
def generic_error(error):
return render_app_template('error_generic.html'), 500
#--------------------------------------------------------------------------
# FILTERS
#--------------------------------------------------------------------------
# Friendly time by Sean Vieira (http://flask.pocoo.org/snippets/33/)
@app.template_filter()
def friendly_time(dt, past_="ago", future_="from now", default="just now"):
"""
Returns string representing "time since"
or "time until" e.g.
3 days ago, 5 hours from now etc.
"""
if dt is None:
return ''
if isinstance(dt, basestring):
dt = iso8601.parse_date(dt)
# ensure the date is naive for comparison to utcnow
if dt.tzinfo:
dt = dt.astimezone(pytz.utc).replace(tzinfo=None)
now = datetime.datetime.utcnow()
if now > dt:
diff = now - dt
dt_is_past = True
else:
diff = dt - now
dt_is_past = False
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s %s" % (period,
singular if period == 1 else plural,
past_ if dt_is_past else future_)
return default
state_pattern = re.compile(r'\b(\w\w)(,?\s*\d{5}(?:-\d{4})?)?$')
@app.template_filter()
def title_address(address):
'''Slightly improved title() method for address strings
Makes sure state abbreviations are upper-case.'''
titled = address.title()
titled = state_pattern.sub(lambda match: match.group(1).upper() + (match.group(2) or ''), titled)
return titled
#--------------------------------------------------------------------------
# UTILITIES
#--------------------------------------------------------------------------
def render_app_template(template, **kwargs):
'''Add some goodies to all templates.'''
if 'config' not in kwargs:
kwargs['config'] = app.config
if '__version__' not in kwargs:
kwargs['__version__'] = __version__
return render_template(template, **kwargs)
def fixup_sr(sr, request_id=None):
'''
Fix up an SR to try and ensure some basic info.
(In Chicago's API, any field can be missing, even if it's required.)
'''
remove_blacklisted_fields(sr)
if 'service_request_id' not in sr:
sr['service_request_id'] = request_id or sr.get('token', 'UNKNOWN')
if 'status' not in sr:
sr['status'] = 'open'
if 'service_name' not in sr:
sr['service_name'] = 'Miscellaneous Services'
return sr
def remove_blacklisted_fields(sr):
blacklist = app.config.get('SR_FIELD_BLACKLIST')
if blacklist:
for field in blacklist:
if field in sr:
del sr[field]
def subscribe_to_sr(request_id, email):
# validate e-mail
match = re.match(r'[A-Z0-9._%+\-]+@[A-Z0-9.\-]+\.[A-Z]{2,4}$', email, re.IGNORECASE)
if match:
key = updater.subscribe(request_id, 'email', email)
if key:
# TODO: should we use the subscription key instead?
session['addr'] = email
session.permanent = True
return True
else:
app.logger.error('Error creating a subscription for %s on %s', email, request_id)
return False
#--------------------------------------------------------------------------
# INIT
#--------------------------------------------------------------------------
if __name__ == "__main__":
app.config.from_object(__name__)
# we want to support a nice fallback, so use from_pyfile directly instead of from_envvar
config_path = os.path.abspath(os.environ.get('SRTRACKER_CONFIGURATION', DEFAULT_CONFIG_PATH))
if os.path.isfile(config_path):
app.config.from_pyfile(config_path)
else:
app.logger.warn('''YOU ARE USING THE QUICK-START CONFIG, WHICH IS NOT RECOMMENDED.
PUT SOMETHING IN "./configuration.py" OR SET THE "SRTRACKER_CONFIGURATION" ENV VAR INSTEAD.''')
port = int(os.environ.get('PORT', 5100))
app.run(host='0.0.0.0', port=port)
|
[] |
[] |
[
"PORT",
"SRTRACKER_CONFIGURATION"
] |
[]
|
["PORT", "SRTRACKER_CONFIGURATION"]
|
python
| 2 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/123/357/CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67a.java
Label Definition File: CWE113_HTTP_Response_Splitting.label.xml
Template File: sources-sinks-67a.tmpl.java
*/
/*
* @description
* CWE: 113 HTTP Response Splitting
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks: addCookieServlet
* GoodSink: URLEncode input
* BadSink : querystring to addCookie()
* Flow Variant: 67 Data flow: data passed in a class from one method to another in different source files in the same package
*
* */
import javax.servlet.http.*;
public class CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67a extends AbstractTestCaseServlet
{
static class Container
{
public String containerOne;
}
public void bad(HttpServletRequest request, HttpServletResponse response) throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
Container dataContainer = new Container();
dataContainer.containerOne = data;
(new CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67b()).badSink(dataContainer , request, response );
}
public void good(HttpServletRequest request, HttpServletResponse response) throws Throwable
{
goodG2B(request, response);
goodB2G(request, response);
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B(HttpServletRequest request, HttpServletResponse response) throws Throwable
{
String data;
/* FIX: Use a hardcoded string */
data = "foo";
Container dataContainer = new Container();
dataContainer.containerOne = data;
(new CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67b()).goodG2BSink(dataContainer , request, response );
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G(HttpServletRequest request, HttpServletResponse response) throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
Container dataContainer = new Container();
dataContainer.containerOne = data;
(new CWE113_HTTP_Response_Splitting__Environment_addCookieServlet_67b()).goodB2GSink(dataContainer , request, response );
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
install/install.go
|
package install
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"time"
kubeapiclientset "github.com/docker/compose-on-kubernetes/api/client/clientset"
apiv1beta2 "github.com/docker/compose-on-kubernetes/api/compose/v1beta2"
"github.com/docker/compose-on-kubernetes/internal/e2e/wait"
log "github.com/sirupsen/logrus"
appsv1beta2types "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
corev1types "k8s.io/api/core/v1"
rbacv1types "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
)
const (
// TimeoutDefault is the default install timeout.
TimeoutDefault = 30 * time.Second
// installWaitNumMaxPolls is the maximum number of API operations to be
// performed in sequence while waiting for the component to be installed.
installWaitNumMaxPolls = 60
fryKey = "com.docker.fry"
imageTagKey = "com.docker.image-tag"
namespaceKey = "com.docker.deploy-namespace"
defaultServiceTypeKey = "com.docker.default-service-type"
customTLSHashAnnotationName = "com.docker.custom-tls-hash"
composeFry = "compose"
composeAPIServerFry = "compose.api"
composeGroupName = "compose.docker.com"
controllerDebugPort = 40000
apiServerDebugPort = 40001
)
var (
imageRepoPrefix = "docker/kube-compose-"
imagePrefix = func() string {
if ir := os.Getenv("IMAGE_REPO_PREFIX"); ir != "" {
return ir
}
return imageRepoPrefix
}()
everythingSelector = fmt.Sprintf("%s in (%s, %s)", fryKey, composeFry, composeAPIServerFry)
)
var linuxAmd64NodeAffinity = &corev1types.Affinity{
NodeAffinity: &corev1types.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1types.NodeSelector{
NodeSelectorTerms: []corev1types.NodeSelectorTerm{
{
MatchExpressions: []corev1types.NodeSelectorRequirement{
{
Key: "beta.kubernetes.io/os",
Operator: corev1types.NodeSelectorOpIn,
Values: []string{"linux"},
},
{
Key: "beta.kubernetes.io/arch",
Operator: corev1types.NodeSelectorOpIn,
Values: []string{"amd64"},
},
},
},
},
},
},
}
// GetInstallStatus retrives the current installation status
func GetInstallStatus(config *rest.Config) (Status, error) {
installer, err := newInstaller(config)
if err != nil {
return Status{}, err
}
return installer.isInstalled()
}
// Unsafe installs the Compose features without High availability, and with insecure ETCD.
func Unsafe(ctx context.Context, config *rest.Config, options UnsafeOptions) error {
return Do(ctx, config, WithUnsafe(options))
}
// WaitNPods waits for n pods to be up
func WaitNPods(config *rest.Config, namespace string, count int, timeout time.Duration) error {
log.Infof("Wait for %d pod(s) to be up with timeout %s", count, timeout)
client, err := corev1.NewForConfig(config)
if err != nil {
return err
}
period := 2 * time.Second
for start := time.Now(); time.Since(start) < timeout; time.Sleep(period) {
log.Debugf("Check pod(s) are running...")
pods, err := client.Pods(namespace).List(metav1.ListOptions{
LabelSelector: everythingSelector,
})
if err != nil {
return err
}
if len(pods.Items) != count {
log.Debugf("Pod(s) not yet created, waiting %s", period)
continue
}
running, err := allRunning(pods.Items)
if err != nil {
return err
}
if running {
return nil
}
log.Debugf("Pod(s) not running, waiting %s", period)
}
return errors.New("installation timed out")
}
func checkPodContainers(pod corev1types.Pod) error {
for _, status := range pod.Status.ContainerStatuses {
waiting := status.State.Waiting
if waiting != nil {
if IsErrImagePull(waiting.Reason) {
return errors.New(waiting.Message)
}
}
}
return nil
}
func allRunning(pods []corev1types.Pod) (bool, error) {
for _, pod := range pods {
switch pod.Status.Phase {
case corev1types.PodRunning:
case corev1types.PodPending:
return false, checkPodContainers(pod)
case corev1types.PodFailed:
return false, errors.New("unable to start controller: " + pod.Status.Message)
default:
return false, nil
}
}
return true, nil
}
// IsRunning checks if the compose api server is available
func IsRunning(config *rest.Config) (bool, error) {
client, err := kubernetes.NewForConfig(config)
if err != nil {
return false, err
}
groups, err := client.Discovery().ServerGroups()
if err != nil {
return false, err
}
for _, group := range groups.Groups {
if group.Name == apiv1beta2.SchemeGroupVersion.Group {
stackClient, err := kubeapiclientset.NewForConfig(config)
if err != nil {
return false, err
}
err = wait.For(installWaitNumMaxPolls, func() (bool, error) {
_, err := stackClient.ComposeV1beta2().Stacks("e2e").List(metav1.ListOptions{})
if err != nil {
return false, nil
}
_, err = stackClient.ComposeV1beta1().Stacks("e2e").List(metav1.ListOptions{})
return err == nil, nil
})
return err == nil, err
}
}
return false, nil
}
func (c *installer) createNamespace(*installerContext) error {
log.Debugf("Create namespace: %s", c.commonOptions.Namespace)
if _, err := c.coreClient.Namespaces().Get(c.commonOptions.Namespace, metav1.GetOptions{}); err == nil {
return nil
}
ns := &corev1types.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: c.commonOptions.Namespace,
},
}
shouldDo, err := c.objectFilter.filter(ns)
if err != nil {
return err
}
if shouldDo {
_, err := c.coreClient.Namespaces().Create(ns)
return err
}
return nil
}
func (c *installer) createPullSecretIfRequired(ctx *installerContext) error {
if c.commonOptions.PullSecret == "" {
return nil
}
log.Debug("Create pull secret")
secret, err := c.coreClient.Secrets(c.commonOptions.Namespace).Get("compose", metav1.GetOptions{})
if err == nil {
ctx.pullSecret = secret
return nil
}
bin, err := base64.StdEncoding.DecodeString(c.commonOptions.PullSecret)
if err != nil {
return err
}
secret = &corev1types.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "compose",
Namespace: c.commonOptions.Namespace,
Labels: c.labels,
},
Data: map[string][]byte{
".dockercfg": bin,
},
Type: corev1types.SecretTypeDockercfg,
}
shouldDo, err := c.objectFilter.filter(secret)
if err != nil {
return err
}
if shouldDo {
secret, err = c.coreClient.Secrets(c.commonOptions.Namespace).Create(secret)
}
ctx.pullSecret = secret
return err
}
func (c *installer) createServiceAccount(ctx *installerContext) error {
log.Debug("Create ServiceAccount")
sa, err := c.coreClient.ServiceAccounts(c.commonOptions.Namespace).Get("compose", metav1.GetOptions{})
if err == nil {
ctx.serviceAccount = sa
return nil
}
sa = &corev1types.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "compose",
Namespace: c.commonOptions.Namespace,
Labels: c.labels,
},
}
shouldDo, err := c.objectFilter.filter(sa)
if err != nil {
return err
}
if shouldDo {
sa, err = c.coreClient.ServiceAccounts(c.commonOptions.Namespace).Create(sa)
}
ctx.serviceAccount = sa
return err
}
var composeRoleRules = []rbacv1types.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"users", "groups", "serviceaccounts"},
Verbs: []string{"impersonate"},
},
{
APIGroups: []string{"authentication.k8s.io"},
Resources: []string{"*"},
Verbs: []string{"impersonate"},
},
{
APIGroups: []string{"", "apps"},
Resources: []string{"services", "deployments", "statefulsets", "daemonsets"},
Verbs: []string{"get"},
},
{
APIGroups: []string{""},
Resources: []string{"pods", "pods/log"},
Verbs: []string{"get", "watch", "list"},
},
{
APIGroups: []string{composeGroupName},
Resources: []string{"stacks"},
Verbs: []string{"*"},
},
{
APIGroups: []string{composeGroupName},
Resources: []string{"stacks/owner"},
Verbs: []string{"get"},
},
{
APIGroups: []string{"admissionregistration.k8s.io"},
Resources: []string{"validatingwebhookconfigurations", "mutatingwebhookconfigurations"},
Verbs: []string{"get", "watch", "list"},
},
{
APIGroups: []string{"apiregistration.k8s.io"},
Resources: []string{"apiservices"},
ResourceNames: []string{"v1beta1.compose.docker.com", "v1beta2.compose.docker.com"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"apiregistration.k8s.io"},
Resources: []string{"apiservices"},
Verbs: []string{"create"},
},
}
func (c *installer) createSAClusterRole() error {
role, err := c.rbacClient.ClusterRoles().Get("compose-service", metav1.GetOptions{})
var shouldDo bool
if apierrors.IsNotFound(err) {
role = &rbacv1types.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "compose-service",
Labels: c.labels,
},
Rules: composeRoleRules,
}
if shouldDo, err = c.objectFilter.filter(role); err != nil {
return err
}
if shouldDo {
role, err = c.rbacClient.ClusterRoles().Create(role)
}
} else if err == nil {
role.Rules = composeRoleRules
if shouldDo, err = c.objectFilter.filter(role); err != nil {
return err
}
if shouldDo {
role, err = c.rbacClient.ClusterRoles().Update(role)
}
}
return err
}
type roleBindingRequirement struct {
name string
namespace string
roleRef rbacv1types.RoleRef
}
var requiredRoleBindings = []roleBindingRequirement{
{
name: "compose-auth-reader",
namespace: "kube-system",
roleRef: rbacv1types.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
},
}
var requiredClusteRoleBindings = []roleBindingRequirement{
{
name: "compose-auth-delegator",
roleRef: rbacv1types.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
},
{
name: "compose-auth-view",
roleRef: rbacv1types.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "view",
},
},
{
name: "compose",
roleRef: rbacv1types.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "compose-service",
},
},
}
func (c *installer) createSARoleBindings(ctx *installerContext) error {
subjects := []rbacv1types.Subject{
{
Kind: "ServiceAccount",
Name: ctx.serviceAccount.Name,
Namespace: ctx.serviceAccount.Namespace,
},
}
var shouldDo bool
for _, req := range requiredRoleBindings {
shouldCreate := false
rb, err := c.rbacClient.RoleBindings(req.namespace).Get(req.name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
shouldCreate = true
rb = &rbacv1types.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: req.name,
Labels: c.labels,
Namespace: req.namespace,
},
RoleRef: req.roleRef,
Subjects: subjects,
}
} else if err == nil {
rb.RoleRef = req.roleRef
rb.Subjects = subjects
}
if shouldDo, err = c.objectFilter.filter(rb); err != nil {
return err
}
if shouldDo {
if shouldCreate {
_, err = c.rbacClient.RoleBindings(req.namespace).Create(rb)
} else {
_, err = c.rbacClient.RoleBindings(req.namespace).Update(rb)
}
}
if err != nil {
return err
}
}
for _, req := range requiredClusteRoleBindings {
shouldCreate := false
crb, err := c.rbacClient.ClusterRoleBindings().Get(req.name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
shouldCreate = true
crb = &rbacv1types.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: req.name,
Labels: c.labels,
},
RoleRef: req.roleRef,
Subjects: subjects,
}
} else if err == nil {
crb.RoleRef = req.roleRef
crb.Subjects = subjects
}
if shouldDo, err = c.objectFilter.filter(crb); err != nil {
return err
}
if shouldDo {
if shouldCreate {
_, err = c.rbacClient.ClusterRoleBindings().Create(crb)
} else {
_, err = c.rbacClient.ClusterRoleBindings().Update(crb)
}
}
if err != nil {
return err
}
}
return nil
}
func (c *installer) createClusterRoleBindings(ctx *installerContext) error {
log.Debug("Create stack cluster role bindings")
if err := c.createSAClusterRole(); err != nil {
return err
}
log.Debug("Create auth RoleBindings")
return c.createSARoleBindings(ctx)
}
func applyCustomTLSHash(hash string, deploy *appsv1beta2types.Deployment) {
if hash == "" {
return
}
if deploy.Annotations == nil {
deploy.Annotations = make(map[string]string)
}
if deploy.Spec.Template.Annotations == nil {
deploy.Spec.Template.Annotations = make(map[string]string)
}
deploy.Annotations[customTLSHashAnnotationName] = hash
deploy.Spec.Template.Annotations[customTLSHashAnnotationName] = hash
}
func (c *installer) configureAPIServerImage() (image string, args []string, env []corev1types.EnvVar, pullPolicy corev1types.PullPolicy) {
if c.enableCoverage {
return imagePrefix + "api-server-coverage" + ":" + c.commonOptions.Tag,
[]string{},
[]corev1types.EnvVar{{Name: "TEST_API_SERVER", Value: "1"}},
corev1types.PullNever
}
if c.debugImages {
return imagePrefix + "api-server-debug:latest", []string{
"--kubeconfig=",
"--authentication-kubeconfig=",
"--authorization-kubeconfig=",
"--service-name=compose-api",
"--service-namespace", c.commonOptions.Namespace,
},
[]corev1types.EnvVar{},
corev1types.PullNever
}
return imagePrefix + "api-server" + ":" + c.commonOptions.Tag,
[]string{
"--kubeconfig", "",
"--authentication-kubeconfig=",
"--authorization-kubeconfig=",
"--service-name=compose-api",
"--service-namespace", c.commonOptions.Namespace,
},
[]corev1types.EnvVar{},
corev1types.PullAlways
}
func (c *installer) createAPIServer(ctx *installerContext) error {
log.Debugf("Create API server deployment and service in namespace %q", c.commonOptions.Namespace)
image, args, env, pullPolicy := c.configureAPIServerImage()
if c.apiServerImageOverride != "" {
image = c.apiServerImageOverride
}
affinity := c.commonOptions.APIServerAffinity
if affinity == nil {
affinity = linuxAmd64NodeAffinity
}
log.Infof("Api server: image: %q, pullPolicy: %q", image, pullPolicy)
deploy := &appsv1beta2types.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "compose-api",
Namespace: c.commonOptions.Namespace,
Labels: c.apiLabels,
},
Spec: appsv1beta2types.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: c.apiLabels,
},
Template: corev1types.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: c.apiLabels,
},
Spec: corev1types.PodSpec{
ServiceAccountName: ctx.serviceAccount.Name,
ImagePullSecrets: pullSecrets(ctx.pullSecret),
Containers: []corev1types.Container{
{
Name: "compose",
Image: image,
ImagePullPolicy: pullPolicy,
Args: args,
Env: env,
LivenessProbe: &corev1types.Probe{
Handler: corev1types.Handler{
HTTPGet: &corev1types.HTTPGetAction{
Path: "/healthz",
Scheme: corev1types.URISchemeHTTPS,
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
FailureThreshold: 8,
},
},
},
Affinity: affinity,
},
},
},
}
applyEtcdOptions(&deploy.Spec.Template.Spec, c.etcdOptions)
applyNetworkOptions(&deploy.Spec.Template.Spec, c.networkOptions)
port := 9443
if c.networkOptions != nil && c.networkOptions.Port != 0 {
port = int(c.networkOptions.Port)
}
applyCustomTLSHash(c.customTLSHash, deploy)
shouldDo, err := c.objectFilter.filter(deploy)
if err != nil {
return err
}
if shouldDo {
if c.debugImages {
trueval := true
for ix := range deploy.Spec.Template.Spec.Containers {
deploy.Spec.Template.Spec.Containers[ix].SecurityContext = &corev1types.SecurityContext{
Privileged: &trueval,
}
deploy.Spec.Template.Spec.Containers[ix].LivenessProbe = nil
}
}
d, err := c.appsClient.Deployments(c.commonOptions.Namespace).Get("compose-api", metav1.GetOptions{})
if err == nil {
deploy.ObjectMeta.ResourceVersion = d.ObjectMeta.ResourceVersion
_, err = c.appsClient.Deployments(c.commonOptions.Namespace).Update(deploy)
} else {
_, err = c.appsClient.Deployments(c.commonOptions.Namespace).Create(deploy)
}
if err != nil {
return err
}
}
if err = c.createAPIServerService(port); err != nil {
return err
}
if c.debugImages {
// create a load balanced service for exposing remote debug endpoint
return c.createDebugService("compose-api-server-remote-debug", apiServerDebugPort, c.apiLabels)
}
return nil
}
func (c *installer) createAPIServerService(port int) error {
svc := &corev1types.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "compose-api",
Namespace: c.commonOptions.Namespace,
Labels: c.apiLabels,
},
Spec: corev1types.ServiceSpec{
Ports: []corev1types.ServicePort{
{
Name: "api",
Port: 443,
TargetPort: intstr.FromInt(port),
},
},
Selector: c.apiLabels,
},
}
shouldDo, err := c.objectFilter.filter(svc)
if err != nil {
return err
}
if shouldDo {
s, err := c.coreClient.Services(c.commonOptions.Namespace).Get("compose-api", metav1.GetOptions{})
if err == nil {
svc.Spec.ClusterIP = s.Spec.ClusterIP
svc.ObjectMeta.ResourceVersion = s.ObjectMeta.ResourceVersion
_, err = c.coreClient.Services(c.commonOptions.Namespace).Update(svc)
} else {
_, err = c.coreClient.Services(c.commonOptions.Namespace).Create(svc)
}
return err
}
return nil
}
func pullSecrets(secret *corev1types.Secret) []corev1types.LocalObjectReference {
if secret == nil {
return nil
}
return []corev1types.LocalObjectReference{{Name: secret.Name}}
}
func (c *installer) configureControllerImage() (image string, args []string, pullPolicy v1.PullPolicy) {
if c.enableCoverage {
return imagePrefix + "controller-coverage" + ":" + c.commonOptions.Tag, []string{}, corev1types.PullNever
}
if c.debugImages {
return imagePrefix + "controller-debug:latest", []string{
"--kubeconfig=",
"--reconciliation-interval", c.commonOptions.ReconciliationInterval.String(),
}, corev1types.PullNever
}
return imagePrefix + "controller:" + c.commonOptions.Tag, []string{
"--kubeconfig", "",
"--reconciliation-interval", c.commonOptions.ReconciliationInterval.String(),
}, corev1types.PullAlways
}
func (c *installer) createController(ctx *installerContext) error {
log.Debugf("Create deployment with tag %q in namespace %q, reconciliation interval %s", c.commonOptions.Tag, c.commonOptions.Namespace, c.commonOptions.ReconciliationInterval)
image, args, pullPolicy := c.configureControllerImage()
if c.commonOptions.DefaultServiceType != "" {
args = append(args, "--default-service-type="+c.commonOptions.DefaultServiceType)
}
if c.controllerImageOverride != "" {
image = c.controllerImageOverride
}
affinity := c.commonOptions.ControllerAffinity
if affinity == nil {
affinity = linuxAmd64NodeAffinity
}
log.Infof("Controller: image: %q, pullPolicy: %q", image, pullPolicy)
deploy := &appsv1beta2types.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "compose",
Namespace: c.commonOptions.Namespace,
Labels: c.labels,
},
Spec: appsv1beta2types.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: c.labels,
},
Template: corev1types.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: c.labels,
},
Spec: corev1types.PodSpec{
ServiceAccountName: ctx.serviceAccount.Name,
ImagePullSecrets: pullSecrets(ctx.pullSecret),
Containers: []corev1types.Container{
{
Name: "compose",
Image: image,
ImagePullPolicy: pullPolicy,
Args: args,
LivenessProbe: &corev1types.Probe{
Handler: corev1types.Handler{
HTTPGet: &corev1types.HTTPGetAction{
Path: "/healthz",
Scheme: corev1types.URISchemeHTTP,
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
FailureThreshold: 8,
},
},
},
Affinity: affinity,
},
},
},
}
if c.enableCoverage {
deploy.Spec.Template.Spec.Containers[0].Env = []corev1types.EnvVar{{Name: "TEST_COMPOSE_CONTROLLER", Value: "1"}}
}
shouldDo, err := c.objectFilter.filter(deploy)
if err != nil {
return err
}
if shouldDo {
if c.debugImages {
trueval := true
for ix := range deploy.Spec.Template.Spec.Containers {
deploy.Spec.Template.Spec.Containers[ix].SecurityContext = &corev1types.SecurityContext{
Privileged: &trueval,
}
deploy.Spec.Template.Spec.Containers[ix].LivenessProbe = nil
}
}
d, err := c.appsClient.Deployments(c.commonOptions.Namespace).Get("compose", metav1.GetOptions{})
if err == nil {
deploy.ObjectMeta.ResourceVersion = d.ObjectMeta.ResourceVersion
if _, err = c.appsClient.Deployments(c.commonOptions.Namespace).Update(deploy); err != nil {
return err
}
} else if _, err = c.appsClient.Deployments(c.commonOptions.Namespace).Create(deploy); err != nil {
return err
}
}
if c.debugImages {
// create a load balanced service for exposing remote debug endpoint
return c.createDebugService("compose-controller-remote-debug", controllerDebugPort, c.labels)
}
return nil
}
func (c *installer) createDebugService(name string, port int32, labels map[string]string) error {
svc, err := c.coreClient.Services(c.commonOptions.Namespace).Get(name, metav1.GetOptions{})
if err == nil {
svc.Spec.Type = corev1types.ServiceTypeLoadBalancer
svc.Spec.Ports = []corev1types.ServicePort{
{Name: "delve", Port: port, TargetPort: intstr.FromInt(40000)},
}
svc.Spec.Selector = labels
_, err = c.coreClient.Services(c.commonOptions.Namespace).Update(svc)
return err
}
_, err = c.coreClient.Services(c.commonOptions.Namespace).Create(&corev1types.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: corev1types.ServiceSpec{
Type: corev1types.ServiceTypeLoadBalancer,
Selector: labels,
Ports: []corev1types.ServicePort{
{Name: "delve", Port: port, TargetPort: intstr.FromInt(40000)},
},
},
})
return err
}
|
[
"\"IMAGE_REPO_PREFIX\""
] |
[] |
[
"IMAGE_REPO_PREFIX"
] |
[]
|
["IMAGE_REPO_PREFIX"]
|
go
| 1 | 0 | |
ludwig/backend/__init__.py
|
#! /usr/bin/env python
# Copyright (c) 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from ludwig.backend.base import Backend, LocalBackend
from ludwig.utils.horovod_utils import has_horovodrun
try:
import ray as _ray
except Exception:
_ray = None
LOCAL_BACKEND = LocalBackend()
LOCAL = "local"
DASK = "dask"
HOROVOD = "horovod"
RAY = "ray"
ALL_BACKENDS = [LOCAL, DASK, HOROVOD, RAY]
def _has_ray():
# Temporary workaround to prevent tests from automatically using the Ray backend. Taken from
# https://stackoverflow.com/questions/25188119/test-if-code-is-executed-from-within-a-py-test-session
if "PYTEST_CURRENT_TEST" in os.environ:
return False
if _ray is None:
return False
if _ray.is_initialized():
return True
try:
_ray.init("auto", ignore_reinit_error=True)
return True
except Exception:
return False
def get_local_backend(**kwargs):
return LocalBackend(**kwargs)
def create_horovod_backend(**kwargs):
from ludwig.backend.horovod import HorovodBackend
return HorovodBackend(**kwargs)
def create_ray_backend(**kwargs):
from ludwig.backend.ray import RayBackend
return RayBackend(**kwargs)
backend_registry = {
LOCAL: get_local_backend,
HOROVOD: create_horovod_backend,
RAY: create_ray_backend,
None: get_local_backend,
}
def create_backend(type, **kwargs):
if isinstance(type, Backend):
return type
if type is None and _has_ray():
type = RAY
elif type is None and has_horovodrun():
type = HOROVOD
return backend_registry[type](**kwargs)
def initialize_backend(backend):
if isinstance(backend, dict):
backend = create_backend(**backend)
else:
backend = create_backend(backend)
backend.initialize()
return backend
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
api/tacticalrmm/core/goinstaller/installer.go
|
//go:generate goversioninfo -icon=onit.ico -manifest=goversioninfo.exe.manifest -gofile=versioninfo.go
package main
import (
"bufio"
"flag"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
var (
Inno string
Api string
Client string
Site string
Atype string
Power string
Rdp string
Ping string
Token string
DownloadUrl string
)
var netTransport = &http.Transport{
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
}
var netClient = &http.Client{
Timeout: time.Second * 900,
Transport: netTransport,
}
func downloadAgent(filepath string) (err error) {
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
resp, err := netClient.Get(DownloadUrl)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("Bad response: %s", resp.Status)
}
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
func main() {
debugLog := flag.String("log", "", "Verbose output")
localMesh := flag.String("local-mesh", "", "Use local mesh agent")
silent := flag.Bool("silent", false, "Do not popup any message boxes during installation")
cert := flag.String("cert", "", "Path to ca.pem")
flag.Parse()
var debug bool = false
if strings.TrimSpace(strings.ToLower(*debugLog)) == "debug" {
debug = true
}
agentBinary := filepath.Join(os.Getenv("windir"), "Temp", Inno)
tacrmm := filepath.Join(os.Getenv("PROGRAMFILES"), "TacticalAgent", "tacticalrmm.exe")
cmdArgs := []string{
"-m", "install", "--api", Api, "--client-id",
Client, "--site-id", Site, "--agent-type", Atype,
"--auth", Token,
}
if debug {
cmdArgs = append(cmdArgs, "-log", "debug")
}
if *silent {
cmdArgs = append(cmdArgs, "-silent")
}
if len(strings.TrimSpace(*localMesh)) != 0 {
cmdArgs = append(cmdArgs, "-local-mesh", *localMesh)
}
if len(strings.TrimSpace(*cert)) != 0 {
cmdArgs = append(cmdArgs, "-cert", *cert)
}
if Rdp == "1" {
cmdArgs = append(cmdArgs, "-rdp")
}
if Ping == "1" {
cmdArgs = append(cmdArgs, "-ping")
}
if Power == "1" {
cmdArgs = append(cmdArgs, "-power")
}
if debug {
fmt.Println("Installer:", agentBinary)
fmt.Println("Tactical Agent:", tacrmm)
fmt.Println("Download URL:", DownloadUrl)
fmt.Println("Install command:", tacrmm, strings.Join(cmdArgs, " "))
}
fmt.Println("Downloading agent...")
dl := downloadAgent(agentBinary)
if dl != nil {
fmt.Println("ERROR: unable to download agent from", DownloadUrl)
fmt.Println(dl)
os.Exit(1)
}
defer os.Remove(agentBinary)
fmt.Println("Extracting files...")
winagentCmd := exec.Command(agentBinary, "/VERYSILENT", "/SUPPRESSMSGBOXES")
err := winagentCmd.Run()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
time.Sleep(5 * time.Second)
fmt.Println("Installation starting.")
cmd := exec.Command(tacrmm, cmdArgs...)
cmdReader, err := cmd.StdoutPipe()
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
cmdErrReader, oerr := cmd.StderrPipe()
if oerr != nil {
fmt.Fprintln(os.Stderr, oerr)
return
}
scanner := bufio.NewScanner(cmdReader)
escanner := bufio.NewScanner(cmdErrReader)
go func() {
for scanner.Scan() {
fmt.Println(scanner.Text())
}
}()
go func() {
for escanner.Scan() {
fmt.Println(escanner.Text())
}
}()
err = cmd.Start()
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
err = cmd.Wait()
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
}
|
[
"\"windir\"",
"\"PROGRAMFILES\""
] |
[] |
[
"windir",
"PROGRAMFILES"
] |
[]
|
["windir", "PROGRAMFILES"]
|
go
| 2 | 0 | |
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[] |
[] |
[
"SECRET_KEY",
"DATABASE_URL"
] |
[]
|
["SECRET_KEY", "DATABASE_URL"]
|
python
| 2 | 0 | |
app.py
|
import smtplib
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, abort
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user
from functools import wraps
from flask_ckeditor import CKEditor
from forms import *
import stripe
from werkzeug.security import check_password_hash, generate_password_hash
import os
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.config["STRIPE_SECRET_KEY"] = os.getenv("STRIPE_SECRET_KEY")
app.config["STRIPE_PUBLIC_KEY"] = os.getenv("STRIPE_PUBLIC_KEY")
stripe.api_key = app.config["STRIPE_SECRET_KEY"]
ckeditor = CKEditor(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def admin_only(fun):
@wraps(fun)
def decorated_function(*args, **kwargs):
if current_user.id != 1:
return abort(403)
return fun(*args, **kwargs)
return decorated_function
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
cart = db.relationship("Cart", uselist=False, backref="user")
def __repr__(self):
return f"user('{self.id}', '{self.email}', '{self.password}', '{self.name}', '{self.cart})"
class Products(db.Model):
__tablename__ = 'products'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False, unique=True)
description = db.Column(db.Text)
image = db.Column(db.Text)
price = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"products('{self.id}', '{self.name}', '{self.description}', '{self.image}', '{self.price}')"
class Cart(db.Model):
__tablename__ = 'cart'
id = db.Column(db.Integer, primary_key=True)
items = db.Column(db.Text)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return f"cart('{self.id}', '{self.items}', '{self.product_id}', '{self.user_id}')"
db.create_all()
@app.route("/", methods=["POST", "GET"])
def home():
products = Products.query.all()
return render_template("index.html", products=products, current_user=current_user)
@app.route("/product/<int:pid>")
def product(pid):
selected_product = Products.query.get(pid)
return render_template("product.html", product=selected_product, current_user=current_user)
@app.route("/add-product", methods=["POST", "GET"])
@admin_only
def new_product():
form = CreateProductForm()
if form.validate_on_submit():
new_post = Products(
name=form.name.data,
price=form.price.data,
image=form.img_url.data,
description=form.description.data
)
db.session.add(new_post)
db.session.commit()
return redirect(url_for('home', current_user=current_user))
return render_template("add-product.html", form=form, current_user=current_user)
@app.route('/register', methods=["POST", "GET"])
def register():
form = RegisterForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
flash(f"User with this email already exists, log in instead!")
return redirect(url_for('login'))
else:
user_password = generate_password_hash(password=form.password.data, method='pbkdf2:sha256', salt_length=8)
new_user = User(
email=form.email.data,
password=user_password,
name=form.name.data
)
db.session.add(new_user)
db.session.commit()
login_user(new_user)
return redirect(url_for('home'))
return render_template("register.html", form=form, current_user=current_user)
@app.route('/login', methods=["POST", "GET"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash("Email does not exist, please try again.")
return redirect(url_for('login'))
elif not check_password_hash(user.password, form.password.data):
flash('Incorrect Password, please try again.')
return redirect(url_for('login'))
else:
login_user(user)
return redirect(url_for('home'))
return render_template("login.html", form=form, current_user=current_user)
@login_required
@app.route("/product/<int:pid>/add", methods=["Post", "GET"])
def add(pid):
product = Products.query.get(pid)
cart_item = Cart(
items=product.name,
product_id=product.id,
user_id=current_user.id
)
db.session.add(cart_item)
db.session.commit()
flash(f"{product.name} added to shopping cart!")
return redirect(url_for('product', pid=pid))
@login_required
@app.route("/cart", methods=["Post", "GET"])
def cart():
cart = Cart.query.filter_by(user_id=current_user.id).all()
products = [Products.query.filter_by(id=item.product_id).first() for item in cart]
total = 0
for product in products:
total += product.price
return render_template("cart.html", products=products, total=total, current_user=current_user)
@login_required
@app.route("/remove/<int:pid>", methods=["Post", "GET"])
def remove_from_cart(pid):
item_remove = Cart.query.filter_by(product_id=pid).first()
db.session.delete(item_remove)
db.session.commit()
return redirect(url_for('cart'))
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
DOMAIN = os.getenv("DOMAIN")
@app.route('/create-checkout-session', methods=['POST'])
def create_checkout_session():
cart = Cart.query.filter_by(user_id=current_user.id).all()
products = [Products.query.filter_by(id=item.product_id).first() for item in cart]
total = 0
checkout_items = []
for product in products:
total += product.price
schema = {
'price_data': {
'currency': 'usd',
'product_data': {
'name': product.name,
},
'unit_amount': int(str(product.price) + "00"),
},
'quantity': 1,
}
checkout_items.append(schema)
try:
checkout_session = stripe.checkout.Session.create(
payment_intent_data={
'setup_future_usage': 'off_session',
},
customer_email=current_user.email,
payment_method_types=['card'],
line_items=checkout_items,
mode='payment',
success_url=DOMAIN + '/success',
cancel_url=DOMAIN + '/cancel',
shipping_address_collection={
"allowed_countries": ['US', 'CA'],
}
)
return jsonify({'id': checkout_session.id})
except Exception as e:
return jsonify(error=str(e)), 403
@app.route("/success/", methods=["GET", "POST"])
def success():
cart = Cart.query.filter_by(user_id=current_user.id).all()
message = f"Subject:New Order\n\nHi {current_user.name}! " \
f"We received your order! It will arrive to you in 10 business days ."
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=os.getenv("FROM_EMAIL"), password=os.getenv("PASSWORD"))
connection.sendmail(from_addr=os.getenv("FROM_EMAIL"),
to_addrs=current_user.email,
msg=f"Subject:hello,{current_user.name}\nMessage:{message}")
flash("Success, we sent you a confirmation email!")
for item in cart:
db.session.delete(item)
db.session.commit()
return redirect(url_for("home", alert="alert-success", current_user=current_user))
@login_required
@app.route("/cancel")
def cancel():
flash("Error has occurred, try again!")
return redirect(url_for("home", alert="alert-danger", current_user=current_user))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[] |
[] |
[
"DOMAIN",
"PASSWORD",
"STRIPE_SECRET_KEY",
"DATABASE_URL",
"STRIPE_PUBLIC_KEY",
"FROM_EMAIL"
] |
[]
|
["DOMAIN", "PASSWORD", "STRIPE_SECRET_KEY", "DATABASE_URL", "STRIPE_PUBLIC_KEY", "FROM_EMAIL"]
|
python
| 6 | 0 | |
api/app.py
|
from starlette.applications import Starlette
from starlette.responses import UJSONResponse
import gpt_2_simple as gpt2
import tensorflow as tf
import uvicorn
import os
import gc
app = Starlette(debug=False)
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess)
# Needed to avoid cross-domain issues
response_header = {
'Access-Control-Allow-Origin': '*'
}
generate_count = 0
@app.route('/', methods=['GET', 'POST', 'HEAD'])
async def homepage(request):
global generate_count
global sess
if request.method == 'GET':
params = request.query_params
elif request.method == 'POST':
params = await request.json()
elif request.method == 'HEAD':
return UJSONResponse({'text': ''},
headers=response_header)
text = gpt2.generate(sess,
run_name='run1',
length=int(params.get('length', 512)),
temperature=float(params.get('temperature', 2.1)),
top_k=int(params.get('top_k', 45)),
top_p=float(params.get('top_p', 0)),
prefix=params.get('prefix', '')[:500],
truncate=params.get('truncate', None),
include_prefix=str(params.get(
'include_prefix', True)).lower() == 'true',
return_as_list=True
)[0]
generate_count += 1
if generate_count == 8:
# Reload model to prevent Graph/Session from going OOM
tf.reset_default_graph()
sess.close()
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess)
generate_count = 0
gc.collect()
posts = text.split('\n')
# Remove the last title which is probable incomplete
del posts[-1]
return UJSONResponse({'posts': posts},
headers=response_header)
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
AppServer/google/appengine/api/appinfo.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
Library for working with AppInfo records in memory, store and load from
configuration files.
"""
import os
import logging
import re
import string
import wsgiref.util
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'(?!\^).*(?!\$).'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
MODULE_SEPARATOR = ':'
DEFAULT_MODULE = 'default'
PARTITION_RE_STRING = (r'[a-z\d\-]{1,%d}\%s' %
(APP_ID_MAX_LEN, PARTITION_SEPARATOR))
DOMAIN_RE_STRING = (r'(?!\-)[a-z\d\-\.]{1,%d}%s' %
(APP_ID_MAX_LEN, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCES_REGEX = r'^[\d]+$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4|4_1G)|[bB](1|2|4|8|4_1G))$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
APPLICATION = 'application'
MODULE = 'module'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
PAGESPEED = 'pagespeed'
INSTANCE_CLASS = 'instance_class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
INSTANCES = 'instances'
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
PAGES = 'pages'
NAME = 'name'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for _VersionedLibrary.
Args:
name: The name of the library e.g. "django".
url: The URL for the library's project page e.g.
"http://www.djangoproject.com/".
description: A short description of the library e.g. "A framework...".
supported_versions: A list of supported version names ordered by release
date e.g. ["v1", "v2", "v3"].
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime or None if the library is not available by
default e.g. "v1".
deprecated_versions: A list of the versions of the library that have been
deprecated e.g. ["v1", "v2"].
experimental_versions: A list of the versions of the library that are
current experimental e.g. ["v1"].
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5'],
experimental_versions=['1.5'],
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6']),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3']),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15']),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
experimental_versions=['1.2.0'],
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4'],
experimental_versions=['1.2.4b4']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1']),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7']),
_VersionedLibrary(
'PyAMF',
'http://www.pyamf.org/',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1']),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6'],
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11']),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7'],
experimental_versions=['2.7']),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
default_version='1.1.1',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.1.1'): [('numpy', '1.6.1')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s" or "latest" '
'("latest" recommended for development only)')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values what http_headers allows.
http_headers is an static handler key i.e. it applies to handlers with
static_dir or static_files keys. An example of how http_headers is used is
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in HttpHeadersDict i.e. header names are valid.
An instance is used as HttpHeadersDict's KEY_VALIDATOR.
"""
def Validate(self, name, unused_key=None):
"""Returns argument, or raises an exception if it is invalid.
HTTP header names are defined by RFC 2616 section 4.2.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: argument cannot be used as an HTTP
header name.
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in HttpHeadersDict i.e. header values are valid.
An instance is used as HttpHeadersDict's VALUE_VALIDATOR.
"""
def Validate(self, value, key=None):
"""Returns value, or raises an exception if it is invalid.
According to RFC 2616 section 4.2, header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string".
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: argument cannot be used as an
HTTP header value.
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to header_name. If more than one such
value is in self, one of the values is selected arbitrarily, and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Mapping from URLs to handlers.
This class acts like something of a union type. Its purpose is to
describe a mapping between a set of URLs and their handlers. What
handler type a given instance has is determined by which handler-id
attribute is used.
Each mapping can have one and only one handler type. Attempting to
use more than one handler-id attribute will cause an UnknownHandlerType
to be raised during validation. Failure to provide any handler-id
attributes will cause MissingHandlerType to be raised during validation.
The regular expression used by the url field will be used to match against
the entire URL path and query string of the request. This means that
partial maps will not be matched. Specifying a url, say /admin, is the
same as matching against the regular expression '^/admin$'. Don't begin
your matching url with ^ or end them with $. These regular expressions
won't be accepted and will raise ValueError.
Attributes:
login: Whether or not login is required to access URL. Defaults to
'optional'.
secure: Restriction on the protocol which can be used to serve
this URL/handler (HTTP, HTTPS or either).
url: Regular expression used to fully match against the request URLs path.
See Special Cases for using static_dir.
static_files: Handler id attribute that maps URL to the appropriate
file. Can use back regex references to the string matched to url.
upload: Regular expression used by the application configuration
program to know which files are uploaded as blobs. It's very
difficult to determine this using just the url and static_files
so this attribute must be included. Required when defining a
static_files mapping.
A matching file name must fully match against the upload regex, similar
to how url is matched against the request path. Do not begin upload
with ^ or end it with $.
static_dir: Handler id that maps the provided url to a sub-directory
within the application directory. See Special Cases.
mime_type: When used with static_files and static_dir the mime-type
of files served from those directories are overridden with this
value.
script: Handler id that maps URLs to scipt handler within the application
directory that will run using CGI.
position: Used in AppInclude objects to specify whether a handler
should be inserted at the beginning of the primary handler list or at the
end. If 'tail' is specified, the handler is inserted at the end,
otherwise, the handler is inserted at the beginning. This means that
'head' is the effective default.
expiration: When used with static files and directories, the time delta to
use for cache expiration. Has the form '4d 5h 30m 15s', where each letter
signifies days, hours, minutes, and seconds, respectively. The 's' for
seconds may be omitted. Only one amount must be specified, combining
multiple amounts is optional. Example good values: '10', '1d 6h',
'1h 30m', '7d 7d 7d', '5m 30'.
api_endpoint: Handler id that identifies endpoint as an API endpoint,
calls that terminate here will be handled by the api serving framework.
Special cases:
When defining a static_dir handler, do not use a regular expression
in the url attribute. Both the url and static_dir attributes are
automatically mapped to these equivalents:
<url>/(.*)
<static_dir>/\1
For example:
url: /images
static_dir: images_folder
Is the same as this static_files declaration:
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([URL, LOGIN, AUTH_FAIL_ACTION, SECURE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Get handler for mapping.
Returns:
Value of the handler (determined by handler id attribute).
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Get handler type of mapping.
Returns:
Handler type determined by which handler id attribute is set.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a
required attribute for its handler type.
"""
if getattr(self, HANDLER_API_ENDPOINT) is not None:
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure handler has correct fields.
In addition to normal ValidatedCheck calls GetHandlerType
which validates all the handler fields are configured
properly.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: when mime_type is inconsistent with
http_headers.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that self.http_headers is consistent with self.mime_type.
Assumes self is a static handler i.e. either self.static_dir or
self.static_files is set (to not None).
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: Raised when
self.http_headers contains a Content-Type header, and self.mime_type is
set. For example, the following configuration would be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
http_headers and mime_type specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Force omitted 'secure: ...' handler fields to 'secure: optional'.
The effect is that handler.secure is never equal to the (nominal)
default.
See http://b/issue?id=2073962.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See:
https://developers.google.com/appengine/docs/python/config/appconfig#Reserved_URLs
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing admin console page in AdminConsole object.
"""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing admin console directives in application info.
"""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Return the result of merging two AdminConsole objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info.
"""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing builtin handler directives in application info.
Permits arbitrary keys but their values must be described by the
validation.Options object returned by ATTRIBUTES.
"""
class DynamicAttributes(dict):
"""Provide a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any get operation. The fixed
value passed in as a constructor parameter should be a
validation.Validated object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensure that all BuiltinHandler objects at least have attribute 'default'.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Permit ATTRIBUTES.iteritems() to return set of items that have values.
Whenever validate calls iteritems(), it is always called on ATTRIBUTES,
not on __dict__, so this override is important to ensure that functions
such as ToYAML() return the correct set of keys.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError
return None
def ToDict(self):
"""Convert BuiltinHander object to a dictionary.
Returns:
dictionary of the form: {builtin_handler_name: on/off}
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Find if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: list of BuiltinHandler objects (typically yaml.builtins)
builtin_name: name of builtin to find whether or not it is defined
Returns:
true if builtin_name is defined by a member of builtins_list,
false otherwise
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of BuiltinHandler objects to a list of (name, status)."""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verify that all BuiltinHandler objects are valid and not repeated.
Args:
builtins_list: list of BuiltinHandler objects to validate.
runtime: if set then warnings are generated for builtins that have been
deprecated in the given runtime.
Raises:
InvalidBuiltinFormat if the name of a Builtinhandler object
cannot be determined.
DuplicateBuiltinSpecified if a builtin handler name is used
more than once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing api_config handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Raises if the library configuration is not valid."""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version != 'latest':
if self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
logging.warning(
('%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
We don't validate these further because the feature is in flux.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
result_vm_settings = (vm_settings_two or {}).copy()
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges to EnvironmentVariables instances.
Args:
env_variables_one: The first EnvironmentVariables instance or None.
env_variables_two: The second EnvironmentVariables instance or None.
Returns:
The merged EnvironmentVariables instance, or None if both input instances
are None or empty.
If a variable is specified by both instances, the value from
env_variables_two is used.
"""
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def NormalizeVmSettings(appyaml):
"""Normalize Vm settings.
Args:
appyaml: AppInfoExternal instance.
Returns:
Normalized app yaml.
"""
if appyaml.vm:
if not appyaml.vm_settings:
appyaml.vm_settings = VmSettings()
if 'vm_runtime' not in appyaml.vm_settings:
appyaml.vm_settings['vm_runtime'] = appyaml.runtime
appyaml.runtime = 'vm'
# AppScale: Ensure that each module has only one version.
appyaml.version = 'v1'
return appyaml
class AppInclude(validation.Validated):
"""Class representing the contents of an included app.yaml file.
Used for both builtins and includes directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of <manual_scaling.instances> from the args.
Note that appinclude_one is mutated to be the merged result in this process.
Also, this function needs to be updated if ManualScaling gets additional
fields.
Args:
appinclude_one: object one to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
appinclude_two: object two to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
Returns:
Object that is the result of merging
appinclude_one.manual_scaling.instances and
appinclude_two.manual_scaling.instances. I.e., <appinclude_one>
after the mutations are complete.
"""
def _Instances(appinclude):
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations."""
AppInclude.MergeManualScaling(one, two)
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
one.vm = two.vm or one.vm
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""This function merges an app.yaml file with referenced builtins/includes.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
return NormalizeVmSettings(appyaml)
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""This function merges the non-referential state of the provided AppInclude
objects. That is, builtins and includes directives are not preserved, but
any static objects are copied into an aggregate AppInclude object that
preserves the directives of both provided AppInclude objects.
Note that appinclude_one is mutated to be the merged result in this process.
Args:
appinclude_one: object one to merge
appinclude_two: object two to merge
Returns:
AppInclude object that is the result of merging the static directives of
appinclude_one and appinclude_two. I.e., <appinclude_one> after the
mutations are complete.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a yaml_object builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language.
For example we specify "php-quercus" if this is a Java app
that was generated from PHP source using Quercus
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific 'expiration' set.
See the URLMap.expiration field's documentation for more information.
skip_files: An re object. Files that match this regular expression will
not be uploaded by appcfg.py. For example:
skip_files: |
.svn.*|
#.*#
nobuild_files: An re object. Files that match this regular expression will
not be built into the app. Go only.
api_config: URL root and script/servlet path for enhanced api serving
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MODULE: validation.Optional(MODULE_ID_RE_STRING),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: RUNTIME_RE_STRING,
API_VERSION: API_VERSION_RE_STRING,
INSTANCE_CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
PAGESPEED: validation.Optional(pagespeedinfo.PagespeedEntry),
}
_skip_runtime_checks = False
def CheckInitialized(self):
"""Performs non-regex-based validation.
The following are verified:
- At least one url mapping is provided in the URL mappers.
- Number of url mappers doesn't exceed MAX_URL_MAPS.
- Major version does not contain the string -dot-.
- If api_endpoints are defined, an api_config stanza must be defined.
- If the runtime is python27 and threadsafe is set, then no CGI handlers
can be used.
- That the version name doesn't start with BUILTIN_NAME_PREFIX
Raises:
DuplicateLibrary: if the name library name is specified more than once.
MissingURLMapping: if no URLMap object is present in the object.
TooManyURLMappings: if there are too many URLMap entries.
MissingApiConfig: if api_endpoints exist without an api_config.
MissingThreadsafe: if threadsafe is not set but the runtime requires it.
ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
and CGI handlers are specified.
TooManyScalingSettingsError: if more than one scaling settings block is
present.
"""
super(AppInfoExternal, self).CheckInitialized()
if not self.handlers and not self.builtins and not self.includes:
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if (self.threadsafe is None and
self.runtime == 'python27' and
not self._skip_runtime_checks):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to either "yes" or "no"')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if self.libraries:
if self.runtime != 'python27' and not self._skip_runtime_checks:
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if (self.threadsafe and
self.runtime == 'python27' and
not self._skip_runtime_checks):
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all Library instances active for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries as well as any required dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized Library instances for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries, their required dependencies as well as any
libraries enabled by default. Any libraries with "latest" as their version
will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
for library in libraries:
if library.version == 'latest':
library.version = _NAME_TO_SUPPORTED_LIBRARY[
library.name].supported_versions[-1]
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the AppInfoExternal.
Backend entries may contain directives that modify other parts of the
app.yaml, such as the 'start' directive, which adds a handler for the start
request. This method performs those modifications.
Args:
backend_name: The name of a backend defined in 'backends'.
Raises:
BackendNotFound: If the indicated backend was not listed in 'backends'.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (URLMap) objects.
Args:
handlers: A list of a handler (URLMap) objects.
is_include_file: If true, indicates the we are performing validation
for handlers in an AppInclude file, which may contain special directives.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Load a single AppInfo object where one and only one is expected.
Args:
app_info: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInfoExternal as loaded from a YAML file.
Raises:
ValueError: if a specified service is not valid.
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
return NormalizeVmSettings(appyaml)
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
It is used to pass back information about the newly created app to users
after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Load a single AppInclude object where one and only one is expected.
Args:
app_include: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInclude as loaded from a YAML file.
Raises:
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches _DELTA_REGEX.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_positive_re = re.compile(r'^[ 0-9a-zA-Z\._\+/\$-]{1,256}$')
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
def ValidFilename(filename):
"""Determines if filename is valid.
filename must be a valid pathname.
- It must contain only letters, numbers, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
Args:
filename: The filename to validate.
Returns:
An error string if the filename is invalid. Returns '' if the filename
is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
|
[] |
[] |
[
"APPENGINE_RUNTIME"
] |
[]
|
["APPENGINE_RUNTIME"]
|
python
| 1 | 0 | |
go/test/endtoend/vault/vault_test.go
|
/*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vault
import (
"bufio"
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
)
var (
createTable = `create table product (id bigint(20) primary key, name char(10), created bigint(20));`
insertTable = `insert into product (id, name, created) values(%d, '%s', unix_timestamp());`
)
var (
clusterInstance *cluster.LocalProcessCluster
master *cluster.Vttablet
replica *cluster.Vttablet
cell = "zone1"
hostname = "localhost"
keyspaceName = "ks"
shardName = "0"
dbName = "vt_ks"
mysqlUsers = []string{"vt_dba", "vt_app", "vt_appdebug", "vt_repl", "vt_filtered"}
mysqlPassword = "password"
vtgateUser = "vtgate_user"
vtgatePassword = "password123"
commonTabletArg = []string{
"-vreplication_healthcheck_topology_refresh", "1s",
"-vreplication_healthcheck_retry_delay", "1s",
"-vreplication_retry_delay", "1s",
"-degraded_threshold", "5s",
"-lock_tables_timeout", "5s",
"-watch_replication_stream",
// Frequently reload schema, generating some tablet traffic,
// so we can speed up token refresh
"-queryserver-config-schema-reload-time", "5",
"-serving_state_grace_period", "1s"}
vaultTabletArg = []string{
"-db-credentials-server", "vault",
"-db-credentials-vault-timeout", "3s",
"-db-credentials-vault-path", "kv/prod/dbcreds",
// This is overriden by our env VAULT_ADDR
"-db-credentials-vault-addr", "https://127.0.0.1:8200",
// This is overriden by our env VAULT_CACERT
"-db-credentials-vault-tls-ca", "/path/to/ca.pem",
// This is provided by our env VAULT_ROLEID
//"-db-credentials-vault-roleid", "34644576-9ffc-8bb5-d046-4a0e41194e15",
// Contents of this file provided by our env VAULT_SECRETID
//"-db-credentials-vault-secretidfile", "/path/to/file/containing/secret_id",
// Make this small, so we can get a renewal
"-db-credentials-vault-ttl", "21s"}
vaultVTGateArg = []string{
"-mysql_auth_server_impl", "vault",
"-mysql_auth_vault_timeout", "3s",
"-mysql_auth_vault_path", "kv/prod/vtgatecreds",
// This is overriden by our env VAULT_ADDR
"-mysql_auth_vault_addr", "https://127.0.0.1:8200",
// This is overriden by our env VAULT_CACERT
"-mysql_auth_vault_tls_ca", "/path/to/ca.pem",
// This is provided by our env VAULT_ROLEID
//"-mysql_auth_vault_roleid", "34644576-9ffc-8bb5-d046-4a0e41194e15",
// Contents of this file provided by our env VAULT_SECRETID
//"-mysql_auth_vault_role_secretidfile", "/path/to/file/containing/secret_id",
// Make this small, so we can get a renewal
"-mysql_auth_vault_ttl", "21s"}
mysqlctlArg = []string{
"-db_dba_password", mysqlPassword}
vttabletLogFileName = "vttablet.INFO"
tokenRenewalString = "Vault client status: token renewed"
)
func TestVaultAuth(t *testing.T) {
defer cluster.PanicHandler(nil)
// Instantiate Vitess Cluster objects and start topo
initializeClusterEarly(t)
defer clusterInstance.Teardown()
// start Vault server
vs := startVaultServer(t, master)
defer vs.stop()
// Wait for Vault server to come up
for i := 0; i < 60; i++ {
time.Sleep(250 * time.Millisecond)
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", hostname, vs.port1))
if err != nil {
// Vault is now up, we can continue
break
}
ln.Close()
}
roleID, secretID := setupVaultServer(t, vs)
require.NotEmpty(t, roleID)
require.NotEmpty(t, secretID)
// Passing via environment, easier than trying to modify
// vtgate/vttablet flags within our test machinery
os.Setenv("VAULT_ROLEID", roleID)
os.Setenv("VAULT_SECRETID", secretID)
// Bring up rest of the Vitess cluster
initializeClusterLate(t)
// Create a table
_, err := master.VttabletProcess.QueryTablet(createTable, keyspaceName, true)
require.NoError(t, err)
// This tests the vtgate Vault auth & indirectly vttablet Vault auth too
insertRow(t, 1, "prd-1")
insertRow(t, 2, "prd-2")
cluster.VerifyRowsInTabletForTable(t, replica, keyspaceName, 2, "product")
// Sleep for a while; giving enough time for a token renewal
// and it making it into the (asynchronous) log
time.Sleep(30 * time.Second)
// Check the log for the Vault token renewal message
// If we don't see it, that is a test failure
logContents, _ := ioutil.ReadFile(path.Join(clusterInstance.TmpDirectory, vttabletLogFileName))
require.True(t, bytes.Contains(logContents, []byte(tokenRenewalString)))
}
func startVaultServer(t *testing.T, masterTablet *cluster.Vttablet) *VaultServer {
vs := &VaultServer{
address: hostname,
port1: clusterInstance.GetAndReservePort(),
port2: clusterInstance.GetAndReservePort(),
}
err := vs.start()
require.NoError(t, err)
return vs
}
// Setup everything we need in the Vault server
func setupVaultServer(t *testing.T, vs *VaultServer) (string, string) {
// The setup script uses these environment variables
// We also reuse VAULT_ADDR and VAULT_CACERT later on
os.Setenv("VAULT", vs.execPath)
os.Setenv("VAULT_ADDR", fmt.Sprintf("https://%s:%d", vs.address, vs.port1))
os.Setenv("VAULT_CACERT", path.Join(os.Getenv("PWD"), vaultCAFileName))
setup := exec.Command(
"/bin/bash",
path.Join(os.Getenv("PWD"), vaultSetupScript),
)
logFilePath := path.Join(vs.logDir, "log_setup.txt")
logFile, _ := os.Create(logFilePath)
setup.Stderr = logFile
setup.Stdout = logFile
setup.Env = append(setup.Env, os.Environ()...)
log.Infof("Running Vault setup command: %v", strings.Join(setup.Args, " "))
err := setup.Start()
if err != nil {
log.Errorf("Error during Vault setup: %v", err)
}
setup.Wait()
var secretID, roleID string
file, err := os.Open(logFilePath)
if err != nil {
log.Error(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "ROLE_ID=") {
roleID = strings.Split(scanner.Text(), "=")[1]
} else if strings.HasPrefix(scanner.Text(), "SECRET_ID=") {
secretID = strings.Split(scanner.Text(), "=")[1]
}
}
if err := scanner.Err(); err != nil {
log.Error(err)
}
return roleID, secretID
}
// Setup cluster object and start topo
// We need this before vault, because we re-use the port reservation code
func initializeClusterEarly(t *testing.T) {
clusterInstance = cluster.NewCluster(cell, hostname)
// Start topo server
err := clusterInstance.StartTopo()
require.NoError(t, err)
}
func initializeClusterLate(t *testing.T) {
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
}
clusterInstance.Keyspaces = append(clusterInstance.Keyspaces, *keyspace)
shard := &cluster.Shard{
Name: shardName,
}
master = clusterInstance.NewVttabletInstance("replica", 0, "")
// We don't really need the replica to test this feature
// but keeping it in to excercise the vt_repl user/password path
replica = clusterInstance.NewVttabletInstance("replica", 0, "")
shard.Vttablets = []*cluster.Vttablet{master, replica}
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, vaultTabletArg...)
clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, vaultVTGateArg...)
err := clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard})
require.NoError(t, err)
// Start MySQL
var mysqlCtlProcessList []*exec.Cmd
for _, shard := range clusterInstance.Keyspaces[0].Shards {
for _, tablet := range shard.Vttablets {
proc, err := tablet.MysqlctlProcess.StartProcess()
require.NoError(t, err)
mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
}
}
// Wait for MySQL startup
for _, proc := range mysqlCtlProcessList {
err = proc.Wait()
require.NoError(t, err)
}
for _, tablet := range []*cluster.Vttablet{master, replica} {
for _, user := range mysqlUsers {
query := fmt.Sprintf("ALTER USER '%s'@'%s' IDENTIFIED BY '%s';", user, hostname, mysqlPassword)
_, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
// Reset after the first ALTER, or we lock ourselves out.
tablet.VttabletProcess.DbPassword = mysqlPassword
if err != nil {
query = fmt.Sprintf("ALTER USER '%s'@'%%' IDENTIFIED BY '%s';", user, mysqlPassword)
_, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
require.NoError(t, err)
}
}
query := fmt.Sprintf("create database %s;", dbName)
_, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
require.NoError(t, err)
tablet.VttabletProcess.EnableSemiSync = true
err = tablet.VttabletProcess.Setup()
require.NoError(t, err)
// Modify mysqlctl password too, or teardown will be locked out
tablet.MysqlctlProcess.ExtraArgs = append(tablet.MysqlctlProcess.ExtraArgs, mysqlctlArg...)
}
err = clusterInstance.VtctlclientProcess.InitShardMaster(keyspaceName, shard.Name, cell, master.TabletUID)
require.NoError(t, err)
// Start vtgate
err = clusterInstance.StartVtgate()
require.NoError(t, err)
}
func insertRow(t *testing.T, id int, productName string) {
ctx := context.Background()
vtParams := mysql.ConnParams{
Host: clusterInstance.Hostname,
Port: clusterInstance.VtgateMySQLPort,
Uname: vtgateUser,
Pass: vtgatePassword,
}
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
insertSmt := fmt.Sprintf(insertTable, id, productName)
_, err = conn.ExecuteFetch(insertSmt, 1000, true)
require.NoError(t, err)
}
|
[
"\"PWD\"",
"\"PWD\""
] |
[] |
[
"PWD"
] |
[]
|
["PWD"]
|
go
| 1 | 0 | |
Algorithms/2_Implementation/8.py
|
# https://www.hackerrank.com/challenges/counting-valleys/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countingValleys' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER steps
# 2. STRING path
#
def countingValleys(steps, path):
alt = 0
valley = 0
for z in path:
if(z=='U'):
alt+=1
if(alt==0): valley+=1
else: alt-=1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
steps = int(input().strip())
path = input()
result = countingValleys(steps, path)
fptr.write(str(result) + '\n')
fptr.close()
|
[] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
python
| 1 | 0 | |
vendor/github.com/Azure/azure-storage-blob-go/2016-05-31/azblob/zt_examples_test.go
|
package azblob
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
)
// https://godoc.org/github.com/fluhus/godoc-tricks
func accountInfo() (string, string) {
return os.Getenv("ACCOUNT_NAME"), os.Getenv("ACCOUNT_KEY")
}
// This example shows how to get started using the Azure Storage Blob SDK for Go.
func Example() {
// From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo()
// Use your Storage account's name and key to create a credential object; this is used to access your account.
credential := NewSharedKeyCredential(accountName, accountKey)
// Create a request pipeline that is used to process HTTP(S) requests and responses. It requires
// your account credentials. In more advanced scenarios, you can configure telemetry, retry policies,
// logging, and other options. Also, you can configure multiple request pipelines for different scenarios.
p := NewPipeline(credential, PipelineOptions{})
// From the Azure portal, get your Storage account blob service URL endpoint.
// The URL typically looks like this:
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName))
// Create an ServiceURL object that wraps the service URL and a request pipeline.
serviceURL := NewServiceURL(*u, p)
// Now, you can use the serviceURL to perform various container and blob operations.
// All HTTP operations allow you to specify a Go context.Context object to control cancellation/timeout.
ctx := context.Background() // This example uses a never-expiring context.
// This example shows several common operations just to get you started.
// Create a URL that references a to-be-created container in your Azure Storage account.
// This returns a ContainerURL object that wraps the container's URL and a request pipeline (inherited from serviceURL)
containerURL := serviceURL.NewContainerURL("mycontainer") // Container names require lowercase
// Create the container on the service (with no metadata and no public access)
_, err := containerURL.Create(ctx, Metadata{}, PublicAccessNone)
if err != nil {
log.Fatal(err)
}
// Create a URL that references a to-be-created blob in your Azure Storage account's container.
// This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL)
blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
// Create the blob with string (plain text) content.
data := "Hello World!"
_, err = blobURL.PutBlob(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Download the blob's contents and verify that it worked correctly
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
if err != nil {
log.Fatal(err)
}
downloadedData := &bytes.Buffer{}
downloadedData.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
if data != downloadedData.String() {
log.Fatal("downloaded data doesn't match uploaded data")
}
// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
// Get a result segment starting with the blob indicated by the current Marker.
listBlob, err := containerURL.ListBlobs(ctx, marker, ListBlobsOptions{})
if err != nil {
log.Fatal(err)
}
// IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get
// the next segment (after processing the current result segment).
marker = listBlob.NextMarker
// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
for _, blobInfo := range listBlob.Blobs.Blob {
fmt.Print("Blob name: " + blobInfo.Name + "\n")
}
}
// Delete the blob we created earlier.
_, err = blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Delete the container we created earlier.
_, err = containerURL.Delete(ctx, ContainerAccessConditions{})
if err != nil {
log.Fatal(err)
}
}
// This example shows how you can configure a pipeline for making HTTP requests to the Azure Storage Blob Service.
func ExampleNewPipeline() {
// This example shows how to wire in your own logging mechanism (this example uses
// Go's standard logger to write log information to standard error)
logger := log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)
// Create/configure a request pipeline options object.
// All PipelineOptions' fields are optional; reasonable defaults are set for anything you do not specify
po := PipelineOptions{
// Set RetryOptions to control how HTTP request are retried when retryable failures occur
Retry: RetryOptions{
Policy: RetryPolicyExponential, // Use exponential backoff as opposed to linear
MaxTries: 3, // Try at most 3 times to perform the operation (set to 1 to disable retries)
TryTimeout: time.Second * 3, // Maximum time allowed for any single try
RetryDelay: time.Second * 1, // Backoff amount for each retry (exponential or linear)
MaxRetryDelay: time.Second * 3, // Max delay between retries
},
// Set RequestLogOptions to control how each HTTP request & its response is logged
RequestLog: RequestLogOptions{
LogWarningIfTryOverThreshold: time.Millisecond * 200, // A successful response taking more than this time to arrive is logged as a warning
},
// Set LogOptions to control what & where all pipeline log events go
Log: pipeline.LogOptions{
Log: func(s pipeline.LogLevel, m string) { // This func is called to log each event
// This method is not called for filtered-out severities.
logger.Output(2, m) // This example uses Go's standard logger
},
MinimumLevelToLog: func() pipeline.LogLevel { return pipeline.LogInfo }, // Log all events from informational to more severe
},
}
// Create a request pipeline object configured with credentials and with pipeline options. Once created,
// a pipeline object is goroutine-safe and can be safely used with many XxxURL objects simultaneously.
p := NewPipeline(NewAnonymousCredential(), po) // A pipeline always requires some credential object
// Once you've created a pipeline object, associate it with an XxxURL object so that you can perform HTTP requests with it.
u, _ := url.Parse("https://myaccount.blob.core.windows.net")
serviceURL := NewServiceURL(*u, p)
// Use the serviceURL as desired...
// NOTE: When you use an XxxURL object to create another XxxURL object, the new XxxURL object inherits the
// same pipeline object as its parent. For example, the containerURL and blobURL objects (created below)
// all share the same pipeline. Any HTTP operations you perform with these objects share the behavior (retry, logging, etc.)
containerURL := serviceURL.NewContainerURL("mycontainer")
blobURL := containerURL.NewBlockBlobURL("ReadMe.txt")
// If you'd like to perform some operations with different behavior, create a new pipeline object and
// associate it with a new XxxURL object by passing the new pipeline to the XxxURL object's WithPipeline method.
// In this example, I reconfigure the retry policies, create a new pipeline, and then create a new
// ContainerURL object that has the same URL as its parent.
po.Retry = RetryOptions{
Policy: RetryPolicyFixed, // Use linear backoff
MaxTries: 4, // Try at most 3 times to perform the operation (set to 1 to disable retries)
TryTimeout: time.Minute * 1, // Maximum time allowed for any single try
RetryDelay: time.Second * 5, // Backoff amount for each retry (exponential or linear)
MaxRetryDelay: time.Second * 10, // Max delay between retries
}
newContainerURL := containerURL.WithPipeline(NewPipeline(NewAnonymousCredential(), po))
// Now, any XxxBlobURL object created using newContainerURL inherits the pipeline with the new retry policy.
newBlobURL := newContainerURL.NewBlockBlobURL("ReadMe.txt")
_, _ = blobURL, newBlobURL // Avoid compiler's "declared and not used" error
}
func ExampleStorageError() {
// This example shows how to handle errors returned from various XxxURL methods. All these methods return an
// object implementing the pipeline.Response interface and an object implementing Go's error interface.
// The error result is nil if the request was successful; your code can safely use the Response interface object.
// If error is non-nil, the error could be due to:
// 1. An invalid argument passed to the method. You should not write code to handle these errors;
// instead, fix these errors as they appear during development/testing.
// 2. A network request didn't reach an Azure Storage Service. This usually happens due to a bad URL or
// faulty networking infrastructure (like a router issue). In this case, an object implementing the
// net.Error interface will be returned. The net.Error interface offers Timeout and Temporary methods
// which return true if the network error is determined to be a timeout or temporary condition. If
// your pipeline uses the retry policy factory, then this policy looks for Timeout/Temporary and
// automatically retries based on the retry options you've configured. Because of the retry policy,
// your code will usually not call the Timeout/Temporary methods explicitly other than possibly logging
// the network failure.
// 3. A network request did reach the Azure Storage Service but the service failed to perform the
// requested operation. In this case, an object implementing the StorageError interface is returned.
// The StorageError interface also implements the net.Error interface and, if you use the retry policy,
// you would most likely ignore the Timeout/Temporary methods. However, the StorageError interface exposes
// richer information such as a service error code, an error description, details data, and the
// service-returned http.Response. And, from the http.Response, you can get the initiating http.Request.
u, _ := url.Parse("http://myaccount.blob.core.windows.net/mycontainer")
containerURL := NewContainerURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
create, err := containerURL.Create(context.Background(), Metadata{}, PublicAccessNone)
if err != nil { // An error occurred
if serr, ok := err.(StorageError); ok { // This error is a Service-specific error
// StorageError also implements net.Error so you could call its Timeout/Temporary methods if you want.
switch serr.ServiceCode() { // Compare serviceCode to various ServiceCodeXxx constants
case ServiceCodeContainerAlreadyExists:
// You can also look at the http.Response object that failed.
if failedResponse := serr.Response(); failedResponse != nil {
// From the response object, you can get the initiating http.Request object
failedRequest := failedResponse.Request
_ = failedRequest // Avoid compiler's "declared and not used" error
}
case ServiceCodeContainerBeingDeleted:
// Handle this error ...
default:
// Handle other errors ...
}
}
log.Fatal(err) // Error is not due to Azure Storage service; networking infrastructure failure
}
// If err is nil, then the method was successful; use the response to access the result
_ = create // Avoid compiler's "declared and not used" error
}
// This example shows how to break a URL into its parts so you can
// examine and/or change some of its values and then construct a new URL.
func ExampleBlobURLParts() {
// Let's start with a URL that identifies a snapshot of a blob in a container.
// The URL also contains a Shared Access Signature (SAS):
u, _ := url.Parse("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" +
"snapshot=2011-03-09T01:42:34.9360000Z" +
"sv=2015-02-21&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&" +
"spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=92836758923659283652983562==")
// You can parse this URL into its constituent parts:
parts := NewBlobURLParts(*u)
// Now, we access the parts (this example prints them).
fmt.Println(parts.Host, parts.ContainerName, parts.BlobName, parts.Snapshot)
sas := parts.SAS
fmt.Println(sas.Version(), sas.Resource(), sas.StartTime(), sas.ExpiryTime(), sas.Permissions(),
sas.IPRange(), sas.Protocol(), sas.Identifier(), sas.Services(), sas.Signature())
// You can then change some of the fields and construct a new URL:
parts.SAS = SASQueryParameters{} // Remove the SAS query parameters
parts.Snapshot = time.Time{} // Remove the snapshot timestamp
parts.ContainerName = "othercontainer" // Change the container name
// In this example, we'll keep the blob name as is.
// Construct a new URL from the parts:
newURL := parts.URL()
fmt.Print(newURL.String())
// NOTE: You can pass the new URL to NewBlockBlobURL (or similar methods) to manipulate the blob.
}
// This example shows how to create and use an Azure Storage account Shared Access Signature (SAS).
func ExampleAccountSASSignatureValues() {
// From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo()
// Use your Storage account's name and key to create a credential object; this is required to sign a SAS.
credential := NewSharedKeyCredential(accountName, accountKey)
// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
sasQueryParams := AccountSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
Permissions: AccountSASPermissions{Read: true, List: true}.String(),
Services: AccountSASServices{Blob: true}.String(),
ResourceTypes: AccountSASResourceTypes{Container: true, Object: true}.String(),
}.NewSASQueryParameters(credential)
qp := sasQueryParams.Encode()
urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net?%s", accountName, qp)
// At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose.
// ************************************************************************************************
// When someone receives the URL, they access the SAS-protected resource with code like this:
u, _ := url.Parse(urlToSendToSomeone)
// Create an ServiceURL object that wraps the service URL (and its SAS) and a pipeline.
// When using a SAS URLs, anonymous credentials are required.
serviceURL := NewServiceURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
// Now, you can use this serviceURL just like any other to make requests of the resource.
// You can parse a URL into its constituent parts:
blobURLParts := NewBlobURLParts(serviceURL.URL())
fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
_ = serviceURL // Avoid compiler's "declared and not used" error
}
// This example shows how to create and use a Blob Service Shared Access Signature (SAS).
func ExampleBlobSASSignatureValues() {
// From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo()
// Use your Storage account's name and key to create a credential object; this is required to sign a SAS.
credential := NewSharedKeyCredential(accountName, accountKey)
// This is the name of the container and blob that we're creating a SAS to.
containerName := "mycontainer" // Container names require lowercase
blobName := "HelloWorld.txt" // Blob names can be mixed case
// Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters.
sasQueryParams := BlobSASSignatureValues{
Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP)
ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration
ContainerName: containerName,
BlobName: blobName,
// To produce a container SAS (as opposed to a blob SAS), assign to Permissions using
// ContainerSASPermissions and make sure the BlobName field is "" (the default).
Permissions: BlobSASPermissions{Add: true, Read: true, Write: true}.String(),
}.NewSASQueryParameters(credential)
// Create the URL of the resource you wish to access and append the SAS query parameters.
// Since this is a blob SAS, the URL is to the Azure storage blob.
qp := sasQueryParams.Encode()
urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s?%s",
accountName, containerName, blobName, qp)
// At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose.
// ************************************************************************************************
// When someone receives the URL, they access the SAS-protected resource with code like this:
u, _ := url.Parse(urlToSendToSomeone)
// Create an BlobURL object that wraps the blob URL (and its SAS) and a pipeline.
// When using a SAS URLs, anonymous credentials are required.
blobURL := NewBlobURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{}))
// Now, you can use this blobURL just like any other to make requests of the resource.
// If you have a SAS query parameter string, you can parse it into its parts:
blobURLParts := NewBlobURLParts(blobURL.URL())
fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime())
_ = blobURL // Avoid compiler's "declared and not used" error
}
// This example shows how to manipulate a container's permissions.
func ExampleContainerURL_SetPermissions() {
// From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo()
// Use your Storage account's name and key to create a credential object; this is used to access your account.
credential := NewSharedKeyCredential(accountName, accountKey)
// Create an ContainerURL object that wraps the container's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{}))
// All operations allow you to specify a timeout via a Go context.Context object.
ctx := context.Background() // This example uses a never-expiring context
// Create the container (with no metadata and no public access)
_, err := containerURL.Create(ctx, Metadata{}, PublicAccessNone)
if err != nil {
log.Fatal(err)
}
// Create a URL that references a to-be-created blob in your Azure Storage account's container.
// This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL)
blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case
// Create the blob and put some text in it
_, err = blobURL.PutBlob(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Attempt to read the blob via a simple HTTP GET operation
rawBlobURL := blobURL.URL()
get, err := http.Get(rawBlobURL.String())
if err != nil {
log.Fatal(err)
}
if get.StatusCode == http.StatusNotFound {
// We expected this error because the service returns an HTTP 404 status code when a blob
// exists but the requester does not have permission to access it.
// This is how we change the container's permission to allow public/anonymous aceess:
_, err := containerURL.SetPermissions(ctx, PublicAccessBlob, []SignedIdentifier{}, ContainerAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Now, this works:
get, err = http.Get(rawBlobURL.String())
if err != nil {
log.Fatal(err)
}
defer get.Body.Close()
var text bytes.Buffer
text.ReadFrom(get.Body)
fmt.Print(text.String())
}
}
// This example shows how to perform operations on blob conditionally.
func ExampleBlobAccessConditions() {
// From the Azure portal, get your Storage account's name and account key.
accountName, accountKey := accountInfo()
// Create a BlockBlobURL object that wraps a blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/Data,txt", accountName))
blobURL := NewBlockBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// This helper function displays the results of an operation; it is called frequently below.
showResult := func(response pipeline.Response, err error) {
if err != nil {
if serr, ok := err.(StorageError); !ok {
log.Fatal(err) // Network failure
} else {
fmt.Print("Failure: " + serr.Response().Status + "\n")
}
} else {
if get, ok := response.(*GetResponse); ok {
get.Body().Close() // The client must close the response body when finished with it
}
fmt.Print("Success: " + response.Response().Status + "\n")
}
}
// Create the blob (unconditionally; succeeds)
put, err := blobURL.PutBlob(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
showResult(put, err)
// Download blob content if the blob has been modified since we uploaded it (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfModifiedSince: put.LastModified()}}, false))
// Download blob content if the blob hasn't been modified in the last 24 hours (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false))
// Upload new content if the blob hasn't changed since the version identified by ETag (succeeds):
put, err = blobURL.PutBlob(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfMatch: put.ETag()}})
showResult(put, err)
// Download content if it has changed since the version identified by ETag (fails):
showResult(blobURL.GetBlob(ctx, BlobRange{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: put.ETag()}}, false))
// Upload content if the blob doesn't already exist (fails):
showResult(blobURL.PutBlob(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{},
BlobAccessConditions{HTTPAccessConditions: HTTPAccessConditions{IfNoneMatch: ETagAny}}))
}
// This examples shows how to create a container with metadata and then how to read & update the metadata.
func ExampleMetadata_containers() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created container's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
containerURL := NewContainerURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// Create a container with some metadata (string key/value pairs)
// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
creatingApp, _ := os.Executable()
_, err := containerURL.Create(ctx, Metadata{"createdby": "Jeffrey", "app": creatingApp}, PublicAccessNone)
if err != nil {
log.Fatal(err)
}
// Query the container's metadata
get, err := containerURL.GetPropertiesAndMetadata(ctx, LeaseAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Show the container's metadata
metadata := get.NewMetadata()
for k, v := range metadata {
fmt.Print(k + "=" + v + "\n")
}
// Update the metadata and write it back to the container
metadata["createdby"] = "Aidan" // NOTE: The keyname is in all lowercase letters
_, err = containerURL.SetMetadata(ctx, metadata, ContainerAccessConditions{})
if err != nil {
log.Fatal(err)
}
// NOTE: The SetMetadata & SetProperties methods update the container's ETag & LastModified properties
}
// This examples shows how to create a blob with metadata and then how to read & update
// the blob's read-only properties and metadata.
func ExampleMetadata_blobs() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName))
blobURL := NewBlockBlobURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// Create a blob with metadata (string key/value pairs)
// NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service.
// Therefore, you should always use lowercase letters; especially when querying a map for a metadata key.
creatingApp, _ := os.Executable()
_, err := blobURL.PutBlob(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{},
Metadata{"createdby": "Jeffrey", "app": creatingApp}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Query the blob's properties and metadata
get, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Show some of the blob's read-only properties
fmt.Println(get.BlobType(), get.ETag(), get.LastModified())
// Show the blob's metadata
metadata := get.NewMetadata()
for k, v := range metadata {
fmt.Print(k + "=" + v + "\n")
}
// Update the blob's metadata and write it back to the blob
metadata["updatedby"] = "Grant" // Add a new key/value; NOTE: The keyname is in all lowercase letters
_, err = blobURL.SetMetadata(ctx, metadata, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// NOTE: The SetMetadata method updates the blob's ETag & LastModified properties
}
// This examples shows how to create a blob with HTTP Headers and then how to read & update
// the blob's HTTP headers.
func ExampleBlobHTTPHeaders() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName))
blobURL := NewBlockBlobURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// Create a blob with HTTP headers
_, err := blobURL.PutBlob(ctx, strings.NewReader("Some text"),
BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// GetMetadata returns the blob's properties, HTTP headers, and metadata
get, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Show some of the blob's read-only properties
fmt.Println(get.BlobType(), get.ETag(), get.LastModified())
// Shows some of the blob's HTTP Headers
httpHeaders := get.NewHTTPHeaders()
fmt.Println(httpHeaders.ContentType, httpHeaders.ContentDisposition)
// Update the blob's HTTP Headers and write them back to the blob
httpHeaders.ContentType = "text/plain"
_, err = blobURL.SetProperties(ctx, httpHeaders, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// NOTE: The SetMetadata method updates the blob's ETag & LastModified properties
}
// ExampleBlockBlobURL shows how to upload a lot of data (in blocks) to a blob.
// A block blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB.
// Therefore, the maximum size of a block blob is slightly more than 4.75 TB (100 MB X 50,000 blocks).
func ExampleBlockBlobURL() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyBlockBlob.txt", accountName))
blobURL := NewBlockBlobURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// These helper functions convert a binary block ID to a base-64 string and vice versa
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
blockIDBase64ToBinary := func(blockID string) []byte { binary, _ := base64.StdEncoding.DecodeString(blockID); return binary }
// These helper functions convert an int block ID to a base-64 string and vice versa
blockIDIntToBase64 := func(blockID int) string {
binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
return blockIDBinaryToBase64(binaryBlockID)
}
blockIDBase64ToInt := func(blockID string) int {
blockIDBase64ToBinary(blockID)
return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
}
// Upload 4 blocks to the blob (these blocks are tiny; they can be up to 100MB each)
words := []string{"Azure ", "Storage ", "Block ", "Blob."}
base64BlockIDs := make([]string, len(words)) // The collection of block IDs (base 64 strings)
// Upload each block sequentially (one after the other); for better performance, you want to upload multiple blocks in parallel)
for index, word := range words {
// This example uses the index as the block ID; convert the index/ID into a base-64 encoded string as required by the service.
// NOTE: Over the lifetime of a blob, all block IDs (before base 64 encoding) must be the same length (this example uses 4 byte block IDs).
base64BlockIDs[index] = blockIDIntToBase64(index) // Some people use UUIDs for block IDs
// Upload a block to this blob specifying the Block ID and its content (up to 100MB); this block is uncommitted.
_, err := blobURL.PutBlock(ctx, base64BlockIDs[index], strings.NewReader(word), LeaseAccessConditions{})
if err != nil {
log.Fatal(err)
}
}
// After all the blocks are uploaded, atomically commit them to the blob.
_, err := blobURL.PutBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// For the blob, show each block (ID and size) that is a committed part of it.
getBlock, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{})
if err != nil {
log.Fatal(err)
}
for _, block := range getBlock.CommittedBlocks {
fmt.Printf("Block ID=%d, Size=%d\n", blockIDBase64ToInt(block.Name), block.Size)
}
// Download the blob in its entirety; download operations do not take blocks into account.
// NOTE: For really large blobs, downloading them like allocates a lot of memory.
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
if err != nil {
log.Fatal(err)
}
blobData := &bytes.Buffer{}
blobData.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
fmt.Println(blobData)
}
// ExampleAppendBlobURL shows how to append data (in blocks) to an append blob.
// An append blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB.
// Therefore, the maximum size of an append blob is slightly more than 4.75 TB (100 MB X 50,000 blocks).
func ExampleAppendBlobURL() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyAppendBlob.txt", accountName))
appendBlobURL := NewAppendBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
_, err := appendBlobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
for i := 0; i < 5; i++ { // Append 5 blocks to the append blob
_, err := appendBlobURL.AppendBlock(ctx, strings.NewReader(fmt.Sprintf("Appending block #%d\n", i)), BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
}
// Download the entire append blob's contents and show it.
get, err := appendBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
if err != nil {
log.Fatal(err)
}
b := bytes.Buffer{}
b.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
fmt.Println(b.String())
}
// ExamplePageBlobURL shows how to create and use an account Shared Access Signature (SAS).
func ExamplePageBlobURL() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyPageBlob.txt", accountName))
blobURL := NewPageBlobURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
_, err := blobURL.Create(ctx, PageBlobPageBytes*4, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
page := [PageBlobPageBytes]byte{}
copy(page[:], "Page 0")
_, err = blobURL.PutPages(ctx, PageRange{Start: 0 * PageBlobPageBytes, End: 1*PageBlobPageBytes - 1},
bytes.NewReader(page[:]), BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
copy(page[:], "Page 1")
_, err = blobURL.PutPages(ctx, PageRange{Start: 2 * PageBlobPageBytes, End: 3*PageBlobPageBytes - 1},
bytes.NewReader(page[:]), BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
getPages, err := blobURL.GetPageRanges(ctx, BlobRange{Offset: 0 * PageBlobPageBytes, Count: 10*PageBlobPageBytes - 1}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
for _, pr := range getPages.PageRange {
fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
}
_, err = blobURL.ClearPages(ctx, PageRange{Start: 0 * PageBlobPageBytes, End: 1*PageBlobPageBytes - 1}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
getPages, err = blobURL.GetPageRanges(ctx, BlobRange{Offset: 0 * PageBlobPageBytes, Count: 10*PageBlobPageBytes - 1}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
for _, pr := range getPages.PageRange {
fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End)
}
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
if err != nil {
log.Fatal(err)
}
blobData := &bytes.Buffer{}
blobData.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
fmt.Printf("%#v", blobData.Bytes())
}
// This example show how to create a blob, take a snapshot of it, update the base blob,
// read from the blob snapshot, list blobs with their snapshots, and hot to delete blob snapshots.
func Example_blobSnapshots() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object to a container where we'll create a blob and its snapshot.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
containerURL := NewContainerURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
// Create a BlockBlobURL object to a blob in the container.
baseBlobURL := containerURL.NewBlockBlobURL("Original.txt")
ctx := context.Background() // This example uses a never-expiring context
// Create the original blob:
_, err := baseBlobURL.PutBlob(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Create a snapshot of the original blob & save its timestamp:
createSnapshot, err := baseBlobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{})
snapshot := createSnapshot.Snapshot()
// Modify the original blob & show it:
_, err = baseBlobURL.PutBlob(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
get, err := baseBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
b := bytes.Buffer{}
b.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
fmt.Println(b.String())
// Show snapshot blob via original blob URI & snapshot time:
snapshotBlobURL := baseBlobURL.WithSnapshot(snapshot)
get, err = snapshotBlobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
b.Reset()
b.ReadFrom(get.Body())
get.Body().Close() // The client must close the response body when finished with it
fmt.Println(b.String())
// FYI: You can get the base blob URL from one of its snapshot by passing time.Time{} to WithSnapshot:
baseBlobURL = snapshotBlobURL.WithSnapshot(time.Time{})
// Show all blobs in the container with their snapshots:
// List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time.
for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error.
// Get a result segment starting with the blob indicated by the current Marker.
listBlobs, err := containerURL.ListBlobs(ctx, marker, ListBlobsOptions{
Details: BlobListingDetails{Snapshots: true}})
if err != nil {
log.Fatal(err)
}
// IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get
// the next segment (after processing the current result segment).
marker = listBlobs.NextMarker
// Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute)
for _, blobInfo := range listBlobs.Blobs.Blob {
snaptime := "N/A"
if !blobInfo.Snapshot.IsZero() {
snaptime = blobInfo.Snapshot.String()
}
fmt.Printf("Blob name: %s, Snapshot: %s\n", blobInfo.Name, snaptime)
}
}
// Promote read-only snapshot to writable base blob:
_, err = baseBlobURL.StartCopy(ctx, snapshotBlobURL.URL(), Metadata{}, BlobAccessConditions{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// When calling Delete on a base blob:
// DeleteSnapshotsOptionOnly deletes all the base blob's snapshots but not the base blob itself
// DeleteSnapshotsOptionInclude deletes the base blob & all its snapshots.
// DeleteSnapshotOptionNone produces an error if the base blob has any snapshots.
_, err = baseBlobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
}
func Example_progressUploadDownload() {
// Create a request pipeline using your Storage account's name and account key.
accountName, accountKey := accountInfo()
credential := NewSharedKeyCredential(accountName, accountKey)
p := NewPipeline(credential, PipelineOptions{})
// From the Azure portal, get your Storage account blob service URL endpoint.
cURL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName))
// Create an ServiceURL object that wraps the service URL and a request pipeline to making requests.
containerURL := NewContainerURL(*cURL, p)
ctx := context.Background() // This example uses a never-expiring context
// Here's how to create a blob with HTTP headers and metadata (I'm using the same metadata that was put on the container):
blobURL := containerURL.NewBlockBlobURL("Data.bin")
// requestBody is the stream of data to write
requestBody := strings.NewReader("Some text to write")
// Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting.
_, err := blobURL.PutBlob(ctx,
pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) {
fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Len())
}),
BlobHTTPHeaders{
ContentType: "text/html; charset=utf-8",
ContentDisposition: "attachment",
}, Metadata{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
// Here's how to read the blob's data with progress reporting:
get, err := blobURL.GetBlob(ctx, BlobRange{}, BlobAccessConditions{}, false)
if err != nil {
log.Fatal(err)
}
// Wrap the response body in a ResponseBodyProgress and pass a callback function for progress reporting.
responseBody := pipeline.NewResponseBodyProgress(get.Body(), func(bytesTransferred int64) {
fmt.Printf("Read %d of %d bytes.", bytesTransferred, get.ContentLength())
})
downloadedData := &bytes.Buffer{}
downloadedData.ReadFrom(responseBody)
responseBody.Close() // The client must close the response body when finished with it
// The downloaded blob data is in downloadData's buffer
}
// This example shows how to copy a source document on the Internet to a blob.
func ExampleBlobURL_startCopy() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a ContainerURL object to a container where we'll create a blob and its snapshot.
// Create a BlockBlobURL object to a blob in the container.
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/CopiedBlob.bin", accountName))
blobURL := NewBlobURL(*u,
NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg")
startCopy, err := blobURL.StartCopy(ctx, *src, nil, BlobAccessConditions{}, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
//abortCopy, err := blobURL.AbortCopy(ct, copyID, LeaseAccessConditions{})
copyID := startCopy.CopyID()
copyStatus := startCopy.CopyStatus()
for copyStatus == CopyStatusPending {
time.Sleep(time.Second * 2)
getMetadata, err := blobURL.GetPropertiesAndMetadata(ctx, BlobAccessConditions{})
if err != nil {
log.Fatal(err)
}
copyStatus = getMetadata.CopyStatus()
}
fmt.Printf("Copy from %s to %s: ID=%s, Status=%s\n", src.String(), blobURL, copyID, copyStatus)
}
// This example shows how to copy a large stream in blocks (chunks) to a block blob.
func ExampleUploadStreamToBlockBlob() {
file, err := os.Open("BigFile.bin") // Open the file we want to upload
if err != nil {
log.Fatal(err)
}
defer file.Close()
fileSize, err := file.Stat() // Get the size of the file (stream)
if err != nil {
log.Fatal(err)
}
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a BlockBlobURL object to a blob in the container (we assume the container already exists).
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlockBlob.bin", accountName))
blockBlobURL := NewBlockBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
ctx := context.Background() // This example uses a never-expiring context
// Pass the Context, stream, stream size, block blob URL, and options to StreamToBlockBlob
response, err := UploadFileToBlockBlob(ctx, file, blockBlobURL,
UploadToBlockBlobOptions{
// If Progress is non-nil, this function is called periodically as bytes are uploaded.
Progress: func(bytesTransferred int64) {
fmt.Printf("Uploaded %d of %d bytes.\n", bytesTransferred, fileSize.Size())
},
})
if err != nil {
log.Fatal(err)
}
_ = response // Avoid compiler's "declared and not used" error
}
// This example shows how to download a large stream with intelligent retries. Specifically, if
// the connection fails while reading, continuing to read from this stream initiates a new
// GetBlob call passing a range that starts from the last byte successfully read before the failure.
func ExampleNewDownloadStream() {
// From the Azure portal, get your Storage account blob service URL endpoint.
accountName, accountKey := accountInfo()
// Create a BlobURL object to a blob in the container (we assume the container & blob already exist).
u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlob.bin", accountName))
blobURL := NewBlobURL(*u, NewPipeline(NewSharedKeyCredential(accountName, accountKey), PipelineOptions{}))
contentLength := int64(0) // Used for progress reporting to report the total number of bytes being downloaded.
// NewGetRetryStream creates an intelligent retryable stream around a blob; it returns an io.ReadCloser.
rs := NewDownloadStream(context.Background(),
// We pass more tha "blobUrl.GetBlob" here so we can capture the blob's full
// content length on the very first internal call to Read.
func(ctx context.Context, blobRange BlobRange, ac BlobAccessConditions, rangeGetContentMD5 bool) (*GetResponse, error) {
get, err := blobURL.GetBlob(ctx, blobRange, ac, rangeGetContentMD5)
if err == nil && contentLength == 0 {
// If 1st successful Get, record blob's full size for progress reporting
contentLength = get.ContentLength()
}
return get, err
},
DownloadStreamOptions{})
// NewResponseBodyStream wraps the GetRetryStream with progress reporting; it returns an io.ReadCloser.
stream := pipeline.NewResponseBodyProgress(rs,
func(bytesTransferred int64) {
fmt.Printf("Downloaded %d of %d bytes.\n", bytesTransferred, contentLength)
})
defer stream.Close() // The client must close the response body when finished with it
file, err := os.Create("BigFile.bin") // Create the file to hold the downloaded blob contents.
if err != nil {
log.Fatal(err)
}
defer file.Close()
written, err := io.Copy(file, stream) // Write to the file by reading from the blob (with intelligent retries).
if err != nil {
log.Fatal(err)
}
_ = written // Avoid compiler's "declared and not used" error
}
// Lease example?
// Root container?
// List containers/blobs with metadata & HTTP headers? Other?
|
[
"\"ACCOUNT_NAME\"",
"\"ACCOUNT_KEY\""
] |
[] |
[
"ACCOUNT_NAME",
"ACCOUNT_KEY"
] |
[]
|
["ACCOUNT_NAME", "ACCOUNT_KEY"]
|
go
| 2 | 0 | |
src/net/http/transport.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// HTTP client implementation. See RFC 7230 through 7235.
//
// This is the low-level Transport implementation of RoundTripper.
// The high-level interface is in client.go.
package http
import (
"bufio"
"compress/gzip"
"container/list"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"net/http/httptrace"
"net/url"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"golang_org/x/net/http/httpguts"
"golang_org/x/net/http/httpproxy"
)
// DefaultTransport is the default implementation of Transport and is
// used by DefaultClient. It establishes network connections as needed
// and caches them for reuse by subsequent calls. It uses HTTP proxies
// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and
// $no_proxy) environment variables.
var DefaultTransport RoundTripper = &Transport{
Proxy: ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
// DefaultMaxIdleConnsPerHost is the default value of Transport's
// MaxIdleConnsPerHost.
const DefaultMaxIdleConnsPerHost = 2
// Transport is an implementation of RoundTripper that supports HTTP,
// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
//
// By default, Transport caches connections for future re-use.
// This may leave many open connections when accessing many hosts.
// This behavior can be managed using Transport's CloseIdleConnections method
// and the MaxIdleConnsPerHost and DisableKeepAlives fields.
//
// Transports should be reused instead of created as needed.
// Transports are safe for concurrent use by multiple goroutines.
//
// A Transport is a low-level primitive for making HTTP and HTTPS requests.
// For high-level functionality, such as cookies and redirects, see Client.
//
// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
// for HTTPS URLs, depending on whether the server supports HTTP/2,
// and how the Transport is configured. The DefaultTransport supports HTTP/2.
// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
// and call ConfigureTransport. See the package docs for more about HTTP/2.
//
// The Transport will send CONNECT requests to a proxy for its own use
// when processing HTTPS requests, but Transport should generally not
// be used to send a CONNECT request. That is, the Request passed to
// the RoundTrip method should not have a Method of "CONNECT", as Go's
// HTTP/1.x implementation does not support full-duplex request bodies
// being written while the response body is streamed. Go's HTTP/2
// implementation does support full duplex, but many CONNECT proxies speak
// HTTP/1.x.
type Transport struct {
idleMu sync.Mutex
wantIdle bool // user has requested to close all idle conns
idleConn map[connectMethodKey][]*persistConn // most recently used at end
idleConnCh map[connectMethodKey]chan *persistConn
idleLRU connLRU
reqMu sync.Mutex
reqCanceler map[*Request]func(error)
altMu sync.Mutex // guards changing altProto only
altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
//
// The proxy type is determined by the URL scheme. "http"
// and "socks5" are supported. If the scheme is empty,
// "http" is assumed.
//
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*Request) (*url.URL, error)
// DialContext specifies the dial function for creating unencrypted TCP connections.
// If DialContext is nil (and the deprecated Dial below is also nil),
// then the transport dials using package net.
//
// DialContext runs concurrently with calls to RoundTrip.
// A RoundTrip call that initiates a dial may end up using
// an connection dialed previously when the earlier connection
// becomes idle before the later DialContext completes.
DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
// Dial specifies the dial function for creating unencrypted TCP connections.
//
// Dial runs concurrently with calls to RoundTrip.
// A RoundTrip call that initiates a dial may end up using
// an connection dialed previously when the earlier connection
// becomes idle before the later Dial completes.
//
// Deprecated: Use DialContext instead, which allows the transport
// to cancel dials as soon as they are no longer needed.
// If both are set, DialContext takes priority.
Dial func(network, addr string) (net.Conn, error)
// DialTLS specifies an optional dial function for creating
// TLS connections for non-proxied HTTPS requests.
//
// If DialTLS is nil, Dial and TLSClientConfig are used.
//
// If DialTLS is set, the Dial hook is not used for HTTPS
// requests and the TLSClientConfig and TLSHandshakeTimeout
// are ignored. The returned net.Conn is assumed to already be
// past the TLS handshake.
DialTLS func(network, addr string) (net.Conn, error)
// TLSClientConfig specifies the TLS configuration to use with
// tls.Client.
// If nil, the default configuration is used.
// If non-nil, HTTP/2 support may not be enabled by default.
TLSClientConfig *tls.Config
// TLSHandshakeTimeout specifies the maximum amount of time waiting to
// wait for a TLS handshake. Zero means no timeout.
TLSHandshakeTimeout time.Duration
// DisableKeepAlives, if true, prevents re-use of TCP connections
// between different HTTP requests.
DisableKeepAlives bool
// DisableCompression, if true, prevents the Transport from
// requesting compression with an "Accept-Encoding: gzip"
// request header when the Request contains no existing
// Accept-Encoding value. If the Transport requests gzip on
// its own and gets a gzipped response, it's transparently
// decoded in the Response.Body. However, if the user
// explicitly requested gzip it is not automatically
// uncompressed.
DisableCompression bool
// MaxIdleConns controls the maximum number of idle (keep-alive)
// connections across all hosts. Zero means no limit.
MaxIdleConns int
// MaxIdleConnsPerHost, if non-zero, controls the maximum idle
// (keep-alive) connections to keep per-host. If zero,
// DefaultMaxIdleConnsPerHost is used.
MaxIdleConnsPerHost int
// IdleConnTimeout is the maximum amount of time an idle
// (keep-alive) connection will remain idle before closing
// itself.
// Zero means no limit.
IdleConnTimeout time.Duration
// ResponseHeaderTimeout, if non-zero, specifies the amount of
// time to wait for a server's response headers after fully
// writing the request (including its body, if any). This
// time does not include the time to read the response body.
ResponseHeaderTimeout time.Duration
// ExpectContinueTimeout, if non-zero, specifies the amount of
// time to wait for a server's first response headers after fully
// writing the request headers if the request has an
// "Expect: 100-continue" header. Zero means no timeout and
// causes the body to be sent immediately, without
// waiting for the server to approve.
// This time does not include the time to send the request header.
ExpectContinueTimeout time.Duration
// TLSNextProto specifies how the Transport switches to an
// alternate protocol (such as HTTP/2) after a TLS NPN/ALPN
// protocol negotiation. If Transport dials an TLS connection
// with a non-empty protocol name and TLSNextProto contains a
// map entry for that key (such as "h2"), then the func is
// called with the request's authority (such as "example.com"
// or "example.com:1234") and the TLS connection. The function
// must return a RoundTripper that then handles the request.
// If TLSNextProto is not nil, HTTP/2 support is not enabled
// automatically.
TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper
// ProxyConnectHeader optionally specifies headers to send to
// proxies during CONNECT requests.
ProxyConnectHeader Header
// MaxResponseHeaderBytes specifies a limit on how many
// response bytes are allowed in the server's response
// header.
//
// Zero means to use a default limit.
MaxResponseHeaderBytes int64
// nextProtoOnce guards initialization of TLSNextProto and
// h2transport (via onceSetNextProtoDefaults)
nextProtoOnce sync.Once
h2transport *http2Transport // non-nil if http2 wired up
// TODO: tunable on max per-host TCP dials in flight (Issue 13957)
}
// onceSetNextProtoDefaults initializes TLSNextProto.
// It must be called via t.nextProtoOnce.Do.
func (t *Transport) onceSetNextProtoDefaults() {
if strings.Contains(os.Getenv("GODEBUG"), "http2client=0") {
return
}
if t.TLSNextProto != nil {
// This is the documented way to disable http2 on a
// Transport.
return
}
if t.TLSClientConfig != nil || t.Dial != nil || t.DialTLS != nil {
// Be conservative and don't automatically enable
// http2 if they've specified a custom TLS config or
// custom dialers. Let them opt-in themselves via
// http2.ConfigureTransport so we don't surprise them
// by modifying their tls.Config. Issue 14275.
return
}
t2, err := http2configureTransport(t)
if err != nil {
log.Printf("Error enabling Transport HTTP/2 support: %v", err)
return
}
t.h2transport = t2
// Auto-configure the http2.Transport's MaxHeaderListSize from
// the http.Transport's MaxResponseHeaderBytes. They don't
// exactly mean the same thing, but they're close.
//
// TODO: also add this to x/net/http2.Configure Transport, behind
// a +build go1.7 build tag:
if limit1 := t.MaxResponseHeaderBytes; limit1 != 0 && t2.MaxHeaderListSize == 0 {
const h2max = 1<<32 - 1
if limit1 >= h2max {
t2.MaxHeaderListSize = h2max
} else {
t2.MaxHeaderListSize = uint32(limit1)
}
}
}
// ProxyFromEnvironment returns the URL of the proxy to use for a
// given request, as indicated by the environment variables
// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions
// thereof). HTTPS_PROXY takes precedence over HTTP_PROXY for https
// requests.
//
// The environment values may be either a complete URL or a
// "host[:port]", in which case the "http" scheme is assumed.
// An error is returned if the value is a different form.
//
// A nil URL and nil error are returned if no proxy is defined in the
// environment, or a proxy should not be used for the given request,
// as defined by NO_PROXY.
//
// As a special case, if req.URL.Host is "localhost" (with or without
// a port number), then a nil URL and nil error will be returned.
func ProxyFromEnvironment(req *Request) (*url.URL, error) {
return envProxyFunc()(req.URL)
}
// ProxyURL returns a proxy function (for use in a Transport)
// that always returns the same URL.
func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
return func(*Request) (*url.URL, error) {
return fixedURL, nil
}
}
// transportRequest is a wrapper around a *Request that adds
// optional extra headers to write and stores any error to return
// from roundTrip.
type transportRequest struct {
*Request // original request, not to be mutated
extra Header // extra headers to write, or nil
trace *httptrace.ClientTrace // optional
mu sync.Mutex // guards err
err error // first setError value for mapRoundTripError to consider
}
func (tr *transportRequest) extraHeaders() Header {
if tr.extra == nil {
tr.extra = make(Header)
}
return tr.extra
}
func (tr *transportRequest) setError(err error) {
tr.mu.Lock()
if tr.err == nil {
tr.err = err
}
tr.mu.Unlock()
}
// roundTrip implements a RoundTripper over HTTP.
func (t *Transport) roundTrip(req *Request) (*Response, error) {
t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
ctx := req.Context()
trace := httptrace.ContextClientTrace(ctx)
if req.URL == nil {
req.closeBody()
return nil, errors.New("http: nil Request.URL")
}
if req.Header == nil {
req.closeBody()
return nil, errors.New("http: nil Request.Header")
}
scheme := req.URL.Scheme
isHTTP := scheme == "http" || scheme == "https"
if isHTTP {
for k, vv := range req.Header {
if !httpguts.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("net/http: invalid header field name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
return nil, fmt.Errorf("net/http: invalid header field value %q for key %v", v, k)
}
}
}
}
altProto, _ := t.altProto.Load().(map[string]RoundTripper)
if altRT := altProto[scheme]; altRT != nil {
if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
return resp, err
}
}
if !isHTTP {
req.closeBody()
return nil, &badStringError{"unsupported protocol scheme", scheme}
}
if req.Method != "" && !validMethod(req.Method) {
return nil, fmt.Errorf("net/http: invalid method %q", req.Method)
}
if req.URL.Host == "" {
req.closeBody()
return nil, errors.New("http: no Host in request URL")
}
for {
// treq gets modified by roundTrip, so we need to recreate for each retry.
treq := &transportRequest{Request: req, trace: trace}
cm, err := t.connectMethodForRequest(treq)
if err != nil {
req.closeBody()
return nil, err
}
// Get the cached or newly-created connection to either the
// host (for http or https), the http proxy, or the http proxy
// pre-CONNECTed to https server. In any case, we'll be ready
// to send it requests.
pconn, err := t.getConn(treq, cm)
if err != nil {
t.setReqCanceler(req, nil)
req.closeBody()
return nil, err
}
var resp *Response
if pconn.alt != nil {
// HTTP/2 path.
t.setReqCanceler(req, nil) // not cancelable with CancelRequest
resp, err = pconn.alt.RoundTrip(req)
} else {
resp, err = pconn.roundTrip(treq)
}
if err == nil {
return resp, nil
}
if !pconn.shouldRetryRequest(req, err) {
// Issue 16465: return underlying net.Conn.Read error from peek,
// as we've historically done.
if e, ok := err.(transportReadFromServerError); ok {
err = e.err
}
return nil, err
}
testHookRoundTripRetried()
// Rewind the body if we're able to. (HTTP/2 does this itself so we only
// need to do it for HTTP/1.1 connections.)
if req.GetBody != nil && pconn.alt == nil {
newReq := *req
var err error
newReq.Body, err = req.GetBody()
if err != nil {
return nil, err
}
req = &newReq
}
}
}
// shouldRetryRequest reports whether we should retry sending a failed
// HTTP request on a new connection. The non-nil input error is the
// error from roundTrip.
func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
if http2isNoCachedConnError(err) {
// Issue 16582: if the user started a bunch of
// requests at once, they can all pick the same conn
// and violate the server's max concurrent streams.
// Instead, match the HTTP/1 behavior for now and dial
// again to get a new TCP connection, rather than failing
// this request.
return true
}
if err == errMissingHost {
// User error.
return false
}
if !pc.isReused() {
// This was a fresh connection. There's no reason the server
// should've hung up on us.
//
// Also, if we retried now, we could loop forever
// creating new connections and retrying if the server
// is just hanging up on us because it doesn't like
// our request (as opposed to sending an error).
return false
}
if _, ok := err.(nothingWrittenError); ok {
// We never wrote anything, so it's safe to retry, if there's no body or we
// can "rewind" the body with GetBody.
return req.outgoingLength() == 0 || req.GetBody != nil
}
if !req.isReplayable() {
// Don't retry non-idempotent requests.
return false
}
if _, ok := err.(transportReadFromServerError); ok {
// We got some non-EOF net.Conn.Read failure reading
// the 1st response byte from the server.
return true
}
if err == errServerClosedIdle {
// The server replied with io.EOF while we were trying to
// read the response. Probably an unfortunately keep-alive
// timeout, just as the client was writing a request.
return true
}
return false // conservatively
}
// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol.
var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
// RegisterProtocol registers a new protocol with scheme.
// The Transport will pass requests using the given scheme to rt.
// It is rt's responsibility to simulate HTTP request semantics.
//
// RegisterProtocol can be used by other packages to provide
// implementations of protocol schemes like "ftp" or "file".
//
// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will
// handle the RoundTrip itself for that one request, as if the
// protocol were not registered.
func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
t.altMu.Lock()
defer t.altMu.Unlock()
oldMap, _ := t.altProto.Load().(map[string]RoundTripper)
if _, exists := oldMap[scheme]; exists {
panic("protocol " + scheme + " already registered")
}
newMap := make(map[string]RoundTripper)
for k, v := range oldMap {
newMap[k] = v
}
newMap[scheme] = rt
t.altProto.Store(newMap)
}
// CloseIdleConnections closes any connections which were previously
// connected from previous requests but are now sitting idle in
// a "keep-alive" state. It does not interrupt any connections currently
// in use.
func (t *Transport) CloseIdleConnections() {
t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
t.idleMu.Lock()
m := t.idleConn
t.idleConn = nil
t.idleConnCh = nil
t.wantIdle = true
t.idleLRU = connLRU{}
t.idleMu.Unlock()
for _, conns := range m {
for _, pconn := range conns {
pconn.close(errCloseIdleConns)
}
}
if t2 := t.h2transport; t2 != nil {
t2.CloseIdleConnections()
}
}
// CancelRequest cancels an in-flight request by closing its connection.
// CancelRequest should only be called after RoundTrip has returned.
//
// Deprecated: Use Request.WithContext to create a request with a
// cancelable context instead. CancelRequest cannot cancel HTTP/2
// requests.
func (t *Transport) CancelRequest(req *Request) {
t.cancelRequest(req, errRequestCanceled)
}
// Cancel an in-flight request, recording the error value.
func (t *Transport) cancelRequest(req *Request, err error) {
t.reqMu.Lock()
cancel := t.reqCanceler[req]
delete(t.reqCanceler, req)
t.reqMu.Unlock()
if cancel != nil {
cancel(err)
}
}
//
// Private implementation past this point.
//
var (
// proxyConfigOnce guards proxyConfig
envProxyOnce sync.Once
envProxyFuncValue func(*url.URL) (*url.URL, error)
)
// defaultProxyConfig returns a ProxyConfig value looked up
// from the environment. This mitigates expensive lookups
// on some platforms (e.g. Windows).
func envProxyFunc() func(*url.URL) (*url.URL, error) {
envProxyOnce.Do(func() {
envProxyFuncValue = httpproxy.FromEnvironment().ProxyFunc()
})
return envProxyFuncValue
}
// resetProxyConfig is used by tests.
func resetProxyConfig() {
envProxyOnce = sync.Once{}
envProxyFuncValue = nil
}
func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
if port := treq.URL.Port(); !validPort(port) {
return cm, fmt.Errorf("invalid URL port %q", port)
}
cm.targetScheme = treq.URL.Scheme
cm.targetAddr = canonicalAddr(treq.URL)
if t.Proxy != nil {
cm.proxyURL, err = t.Proxy(treq.Request)
if err == nil && cm.proxyURL != nil {
if port := cm.proxyURL.Port(); !validPort(port) {
return cm, fmt.Errorf("invalid proxy URL port %q", port)
}
}
}
return cm, err
}
// proxyAuth returns the Proxy-Authorization header to set
// on requests, if applicable.
func (cm *connectMethod) proxyAuth() string {
if cm.proxyURL == nil {
return ""
}
if u := cm.proxyURL.User; u != nil {
username := u.Username()
password, _ := u.Password()
return "Basic " + basicAuth(username, password)
}
return ""
}
// error values for debugging and testing, not seen by users.
var (
errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
errWantIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
errCloseIdleConns = errors.New("http: CloseIdleConnections called")
errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
errIdleConnTimeout = errors.New("http: idle connection timeout")
errNotCachingH2Conn = errors.New("http: not caching alternate protocol's connections")
// errServerClosedIdle is not seen by users for idempotent requests, but may be
// seen by a user if the server shuts down an idle connection and sends its FIN
// in flight with already-written POST body bytes from the client.
// See https://github.com/golang/go/issues/19943#issuecomment-355607646
errServerClosedIdle = errors.New("http: server closed idle connection")
)
// transportReadFromServerError is used by Transport.readLoop when the
// 1 byte peek read fails and we're actually anticipating a response.
// Usually this is just due to the inherent keep-alive shut down race,
// where the server closed the connection at the same time the client
// wrote. The underlying err field is usually io.EOF or some
// ECONNRESET sort of thing which varies by platform. But it might be
// the user's custom net.Conn.Read error too, so we carry it along for
// them to return from Transport.RoundTrip.
type transportReadFromServerError struct {
err error
}
func (e transportReadFromServerError) Error() string {
return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
}
func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
if err := t.tryPutIdleConn(pconn); err != nil {
pconn.close(err)
}
}
func (t *Transport) maxIdleConnsPerHost() int {
if v := t.MaxIdleConnsPerHost; v != 0 {
return v
}
return DefaultMaxIdleConnsPerHost
}
// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
// a new request.
// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
// an error explaining why it wasn't registered.
// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that.
func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
return errKeepAlivesDisabled
}
if pconn.isBroken() {
return errConnBroken
}
if pconn.alt != nil {
return errNotCachingH2Conn
}
pconn.markReused()
key := pconn.cacheKey
t.idleMu.Lock()
defer t.idleMu.Unlock()
waitingDialer := t.idleConnCh[key]
select {
case waitingDialer <- pconn:
// We're done with this pconn and somebody else is
// currently waiting for a conn of this type (they're
// actively dialing, but this conn is ready
// first). Chrome calls this socket late binding. See
// https://insouciant.org/tech/connection-management-in-chromium/
return nil
default:
if waitingDialer != nil {
// They had populated this, but their dial won
// first, so we can clean up this map entry.
delete(t.idleConnCh, key)
}
}
if t.wantIdle {
return errWantIdle
}
if t.idleConn == nil {
t.idleConn = make(map[connectMethodKey][]*persistConn)
}
idles := t.idleConn[key]
if len(idles) >= t.maxIdleConnsPerHost() {
return errTooManyIdleHost
}
for _, exist := range idles {
if exist == pconn {
log.Fatalf("dup idle pconn %p in freelist", pconn)
}
}
t.idleConn[key] = append(idles, pconn)
t.idleLRU.add(pconn)
if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
oldest := t.idleLRU.removeOldest()
oldest.close(errTooManyIdle)
t.removeIdleConnLocked(oldest)
}
if t.IdleConnTimeout > 0 {
if pconn.idleTimer != nil {
pconn.idleTimer.Reset(t.IdleConnTimeout)
} else {
pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
}
}
pconn.idleAt = time.Now()
return nil
}
// getIdleConnCh returns a channel to receive and return idle
// persistent connection for the given connectMethod.
// It may return nil, if persistent connections are not being used.
func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn {
if t.DisableKeepAlives {
return nil
}
key := cm.key()
t.idleMu.Lock()
defer t.idleMu.Unlock()
t.wantIdle = false
if t.idleConnCh == nil {
t.idleConnCh = make(map[connectMethodKey]chan *persistConn)
}
ch, ok := t.idleConnCh[key]
if !ok {
ch = make(chan *persistConn)
t.idleConnCh[key] = ch
}
return ch
}
func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn, idleSince time.Time) {
key := cm.key()
t.idleMu.Lock()
defer t.idleMu.Unlock()
for {
pconns, ok := t.idleConn[key]
if !ok {
return nil, time.Time{}
}
if len(pconns) == 1 {
pconn = pconns[0]
delete(t.idleConn, key)
} else {
// 2 or more cached connections; use the most
// recently used one at the end.
pconn = pconns[len(pconns)-1]
t.idleConn[key] = pconns[:len(pconns)-1]
}
t.idleLRU.remove(pconn)
if pconn.isBroken() {
// There is a tiny window where this is
// possible, between the connecting dying and
// the persistConn readLoop calling
// Transport.removeIdleConn. Just skip it and
// carry on.
continue
}
if pconn.idleTimer != nil && !pconn.idleTimer.Stop() {
// We picked this conn at the ~same time it
// was expiring and it's trying to close
// itself in another goroutine. Don't use it.
continue
}
return pconn, pconn.idleAt
}
}
// removeIdleConn marks pconn as dead.
func (t *Transport) removeIdleConn(pconn *persistConn) {
t.idleMu.Lock()
defer t.idleMu.Unlock()
t.removeIdleConnLocked(pconn)
}
// t.idleMu must be held.
func (t *Transport) removeIdleConnLocked(pconn *persistConn) {
if pconn.idleTimer != nil {
pconn.idleTimer.Stop()
}
t.idleLRU.remove(pconn)
key := pconn.cacheKey
pconns := t.idleConn[key]
switch len(pconns) {
case 0:
// Nothing
case 1:
if pconns[0] == pconn {
delete(t.idleConn, key)
}
default:
for i, v := range pconns {
if v != pconn {
continue
}
// Slide down, keeping most recently-used
// conns at the end.
copy(pconns[i:], pconns[i+1:])
t.idleConn[key] = pconns[:len(pconns)-1]
break
}
}
}
func (t *Transport) setReqCanceler(r *Request, fn func(error)) {
t.reqMu.Lock()
defer t.reqMu.Unlock()
if t.reqCanceler == nil {
t.reqCanceler = make(map[*Request]func(error))
}
if fn != nil {
t.reqCanceler[r] = fn
} else {
delete(t.reqCanceler, r)
}
}
// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
// for the request, we don't set the function and return false.
// Since CancelRequest will clear the canceler, we can use the return value to detect if
// the request was canceled since the last setReqCancel call.
func (t *Transport) replaceReqCanceler(r *Request, fn func(error)) bool {
t.reqMu.Lock()
defer t.reqMu.Unlock()
_, ok := t.reqCanceler[r]
if !ok {
return false
}
if fn != nil {
t.reqCanceler[r] = fn
} else {
delete(t.reqCanceler, r)
}
return true
}
var zeroDialer net.Dialer
func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) {
if t.DialContext != nil {
return t.DialContext(ctx, network, addr)
}
if t.Dial != nil {
c, err := t.Dial(network, addr)
if c == nil && err == nil {
err = errors.New("net/http: Transport.Dial hook returned (nil, nil)")
}
return c, err
}
return zeroDialer.DialContext(ctx, network, addr)
}
// getConn dials and creates a new persistConn to the target as
// specified in the connectMethod. This includes doing a proxy CONNECT
// and/or setting up TLS. If this doesn't return an error, the persistConn
// is ready to write requests to.
func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (*persistConn, error) {
req := treq.Request
trace := treq.trace
ctx := req.Context()
if trace != nil && trace.GetConn != nil {
trace.GetConn(cm.addr())
}
if pc, idleSince := t.getIdleConn(cm); pc != nil {
if trace != nil && trace.GotConn != nil {
trace.GotConn(pc.gotIdleConnTrace(idleSince))
}
// set request canceler to some non-nil function so we
// can detect whether it was cleared between now and when
// we enter roundTrip
t.setReqCanceler(req, func(error) {})
return pc, nil
}
type dialRes struct {
pc *persistConn
err error
}
dialc := make(chan dialRes)
// Copy these hooks so we don't race on the postPendingDial in
// the goroutine we launch. Issue 11136.
testHookPrePendingDial := testHookPrePendingDial
testHookPostPendingDial := testHookPostPendingDial
handlePendingDial := func() {
testHookPrePendingDial()
go func() {
if v := <-dialc; v.err == nil {
t.putOrCloseIdleConn(v.pc)
}
testHookPostPendingDial()
}()
}
cancelc := make(chan error, 1)
t.setReqCanceler(req, func(err error) { cancelc <- err })
go func() {
pc, err := t.dialConn(ctx, cm)
dialc <- dialRes{pc, err}
}()
idleConnCh := t.getIdleConnCh(cm)
select {
case v := <-dialc:
// Our dial finished.
if v.pc != nil {
if trace != nil && trace.GotConn != nil && v.pc.alt == nil {
trace.GotConn(httptrace.GotConnInfo{Conn: v.pc.conn})
}
return v.pc, nil
}
// Our dial failed. See why to return a nicer error
// value.
select {
case <-req.Cancel:
// It was an error due to cancelation, so prioritize that
// error value. (Issue 16049)
return nil, errRequestCanceledConn
case <-req.Context().Done():
return nil, req.Context().Err()
case err := <-cancelc:
if err == errRequestCanceled {
err = errRequestCanceledConn
}
return nil, err
default:
// It wasn't an error due to cancelation, so
// return the original error message:
return nil, v.err
}
case pc := <-idleConnCh:
// Another request finished first and its net.Conn
// became available before our dial. Or somebody
// else's dial that they didn't use.
// But our dial is still going, so give it away
// when it finishes:
handlePendingDial()
if trace != nil && trace.GotConn != nil {
trace.GotConn(httptrace.GotConnInfo{Conn: pc.conn, Reused: pc.isReused()})
}
return pc, nil
case <-req.Cancel:
handlePendingDial()
return nil, errRequestCanceledConn
case <-req.Context().Done():
handlePendingDial()
return nil, req.Context().Err()
case err := <-cancelc:
handlePendingDial()
if err == errRequestCanceled {
err = errRequestCanceledConn
}
return nil, err
}
}
// The connect method and the transport can both specify a TLS
// Host name. The transport's name takes precedence if present.
func chooseTLSHost(cm connectMethod, t *Transport) string {
tlsHost := ""
if t.TLSClientConfig != nil {
tlsHost = t.TLSClientConfig.ServerName
}
if tlsHost == "" {
tlsHost = cm.tlsHost()
}
return tlsHost
}
// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS
// tunnel, this function establishes a nested TLS session inside the encrypted channel.
// The remote endpoint's name may be overridden by TLSClientConfig.ServerName.
func (pconn *persistConn) addTLS(name string, trace *httptrace.ClientTrace) error {
// Initiate TLS and check remote host name against certificate.
cfg := cloneTLSConfig(pconn.t.TLSClientConfig)
if cfg.ServerName == "" {
cfg.ServerName = name
}
plainConn := pconn.conn
tlsConn := tls.Client(plainConn, cfg)
errc := make(chan error, 2)
var timer *time.Timer // for canceling TLS handshake
if d := pconn.t.TLSHandshakeTimeout; d != 0 {
timer = time.AfterFunc(d, func() {
errc <- tlsHandshakeTimeoutError{}
})
}
go func() {
if trace != nil && trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
err := tlsConn.Handshake()
if timer != nil {
timer.Stop()
}
errc <- err
}()
if err := <-errc; err != nil {
plainConn.Close()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tls.ConnectionState{}, err)
}
return err
}
cs := tlsConn.ConnectionState()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(cs, nil)
}
pconn.tlsState = &cs
pconn.conn = tlsConn
return nil
}
func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (*persistConn, error) {
pconn := &persistConn{
t: t,
cacheKey: cm.key(),
reqch: make(chan requestAndChan, 1),
writech: make(chan writeRequest, 1),
closech: make(chan struct{}),
writeErrCh: make(chan error, 1),
writeLoopDone: make(chan struct{}),
}
trace := httptrace.ContextClientTrace(ctx)
wrapErr := func(err error) error {
if cm.proxyURL != nil {
// Return a typed error, per Issue 16997
return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err}
}
return err
}
if cm.scheme() == "https" && t.DialTLS != nil {
var err error
pconn.conn, err = t.DialTLS("tcp", cm.addr())
if err != nil {
return nil, wrapErr(err)
}
if pconn.conn == nil {
return nil, wrapErr(errors.New("net/http: Transport.DialTLS returned (nil, nil)"))
}
if tc, ok := pconn.conn.(*tls.Conn); ok {
// Handshake here, in case DialTLS didn't. TLSNextProto below
// depends on it for knowing the connection state.
if trace != nil && trace.TLSHandshakeStart != nil {
trace.TLSHandshakeStart()
}
if err := tc.Handshake(); err != nil {
go pconn.conn.Close()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(tls.ConnectionState{}, err)
}
return nil, err
}
cs := tc.ConnectionState()
if trace != nil && trace.TLSHandshakeDone != nil {
trace.TLSHandshakeDone(cs, nil)
}
pconn.tlsState = &cs
}
} else {
conn, err := t.dial(ctx, "tcp", cm.addr())
if err != nil {
return nil, wrapErr(err)
}
pconn.conn = conn
if cm.scheme() == "https" {
var firstTLSHost string
if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil {
return nil, wrapErr(err)
}
if err = pconn.addTLS(firstTLSHost, trace); err != nil {
return nil, wrapErr(err)
}
}
}
// Proxy setup.
switch {
case cm.proxyURL == nil:
// Do nothing. Not using a proxy.
case cm.proxyURL.Scheme == "socks5":
conn := pconn.conn
d := socksNewDialer("tcp", conn.RemoteAddr().String())
if u := cm.proxyURL.User; u != nil {
auth := &socksUsernamePassword{
Username: u.Username(),
}
auth.Password, _ = u.Password()
d.AuthMethods = []socksAuthMethod{
socksAuthMethodNotRequired,
socksAuthMethodUsernamePassword,
}
d.Authenticate = auth.Authenticate
}
if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil {
conn.Close()
return nil, err
}
case cm.targetScheme == "http":
pconn.isProxy = true
if pa := cm.proxyAuth(); pa != "" {
pconn.mutateHeaderFunc = func(h Header) {
h.Set("Proxy-Authorization", pa)
}
}
case cm.targetScheme == "https":
conn := pconn.conn
hdr := t.ProxyConnectHeader
if hdr == nil {
hdr = make(Header)
}
connectReq := &Request{
Method: "CONNECT",
URL: &url.URL{Opaque: cm.targetAddr},
Host: cm.targetAddr,
Header: hdr,
}
if pa := cm.proxyAuth(); pa != "" {
connectReq.Header.Set("Proxy-Authorization", pa)
}
connectReq.Write(conn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(conn)
resp, err := ReadResponse(br, connectReq)
if err != nil {
conn.Close()
return nil, err
}
if resp.StatusCode != 200 {
f := strings.SplitN(resp.Status, " ", 2)
conn.Close()
if len(f) < 2 {
return nil, errors.New("unknown status code")
}
return nil, errors.New(f[1])
}
}
if cm.proxyURL != nil && cm.targetScheme == "https" {
if err := pconn.addTLS(cm.tlsHost(), trace); err != nil {
return nil, err
}
}
if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
return &persistConn{alt: next(cm.targetAddr, pconn.conn.(*tls.Conn))}, nil
}
}
pconn.br = bufio.NewReader(pconn)
pconn.bw = bufio.NewWriter(persistConnWriter{pconn})
go pconn.readLoop()
go pconn.writeLoop()
return pconn, nil
}
// persistConnWriter is the io.Writer written to by pc.bw.
// It accumulates the number of bytes written to the underlying conn,
// so the retry logic can determine whether any bytes made it across
// the wire.
// This is exactly 1 pointer field wide so it can go into an interface
// without allocation.
type persistConnWriter struct {
pc *persistConn
}
func (w persistConnWriter) Write(p []byte) (n int, err error) {
n, err = w.pc.conn.Write(p)
w.pc.nwrite += int64(n)
return
}
// connectMethod is the map key (in its String form) for keeping persistent
// TCP connections alive for subsequent HTTP requests.
//
// A connect method may be of the following types:
//
// Cache key form Description
// ----------------- -------------------------
// |http|foo.com http directly to server, no proxy
// |https|foo.com https directly to server, no proxy
// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
// http://proxy.com|http http to proxy, http to anywhere after that
// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com
// https://proxy.com|http https to proxy, http to anywhere after that
//
type connectMethod struct {
proxyURL *url.URL // nil for no proxy, else full proxy URL
targetScheme string // "http" or "https"
// If proxyURL specifies an http or https proxy, and targetScheme is http (not https),
// then targetAddr is not included in the connect method key, because the socket can
// be reused for different targetAddr values.
targetAddr string
}
func (cm *connectMethod) key() connectMethodKey {
proxyStr := ""
targetAddr := cm.targetAddr
if cm.proxyURL != nil {
proxyStr = cm.proxyURL.String()
if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" {
targetAddr = ""
}
}
return connectMethodKey{
proxy: proxyStr,
scheme: cm.targetScheme,
addr: targetAddr,
}
}
// scheme returns the first hop scheme: http, https, or socks5
func (cm *connectMethod) scheme() string {
if cm.proxyURL != nil {
return cm.proxyURL.Scheme
}
return cm.targetScheme
}
// addr returns the first hop "host:port" to which we need to TCP connect.
func (cm *connectMethod) addr() string {
if cm.proxyURL != nil {
return canonicalAddr(cm.proxyURL)
}
return cm.targetAddr
}
// tlsHost returns the host name to match against the peer's
// TLS certificate.
func (cm *connectMethod) tlsHost() string {
h := cm.targetAddr
if hasPort(h) {
h = h[:strings.LastIndex(h, ":")]
}
return h
}
// connectMethodKey is the map key version of connectMethod, with a
// stringified proxy URL (or the empty string) instead of a pointer to
// a URL.
type connectMethodKey struct {
proxy, scheme, addr string
}
func (k connectMethodKey) String() string {
// Only used by tests.
return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr)
}
// persistConn wraps a connection, usually a persistent one
// (but may be used for non-keep-alive requests as well)
type persistConn struct {
// alt optionally specifies the TLS NextProto RoundTripper.
// This is used for HTTP/2 today and future protocols later.
// If it's non-nil, the rest of the fields are unused.
alt RoundTripper
t *Transport
cacheKey connectMethodKey
conn net.Conn
tlsState *tls.ConnectionState
br *bufio.Reader // from conn
bw *bufio.Writer // to conn
nwrite int64 // bytes written
reqch chan requestAndChan // written by roundTrip; read by readLoop
writech chan writeRequest // written by roundTrip; read by writeLoop
closech chan struct{} // closed when conn closed
isProxy bool
sawEOF bool // whether we've seen EOF from conn; owned by readLoop
readLimit int64 // bytes allowed to be read; owned by readLoop
// writeErrCh passes the request write error (usually nil)
// from the writeLoop goroutine to the readLoop which passes
// it off to the res.Body reader, which then uses it to decide
// whether or not a connection can be reused. Issue 7569.
writeErrCh chan error
writeLoopDone chan struct{} // closed when write loop ends
// Both guarded by Transport.idleMu:
idleAt time.Time // time it last become idle
idleTimer *time.Timer // holding an AfterFunc to close it
mu sync.Mutex // guards following fields
numExpectedResponses int
closed error // set non-nil when conn is closed, before closech is closed
canceledErr error // set non-nil if conn is canceled
broken bool // an error has happened on this connection; marked broken so it's not reused.
reused bool // whether conn has had successful request/response and is being reused.
// mutateHeaderFunc is an optional func to modify extra
// headers on each outbound request before it's written. (the
// original Request given to RoundTrip is not modified)
mutateHeaderFunc func(Header)
}
func (pc *persistConn) maxHeaderResponseSize() int64 {
if v := pc.t.MaxResponseHeaderBytes; v != 0 {
return v
}
return 10 << 20 // conservative default; same as http2
}
func (pc *persistConn) Read(p []byte) (n int, err error) {
if pc.readLimit <= 0 {
return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize())
}
if int64(len(p)) > pc.readLimit {
p = p[:pc.readLimit]
}
n, err = pc.conn.Read(p)
if err == io.EOF {
pc.sawEOF = true
}
pc.readLimit -= int64(n)
return
}
// isBroken reports whether this connection is in a known broken state.
func (pc *persistConn) isBroken() bool {
pc.mu.Lock()
b := pc.closed != nil
pc.mu.Unlock()
return b
}
// canceled returns non-nil if the connection was closed due to
// CancelRequest or due to context cancelation.
func (pc *persistConn) canceled() error {
pc.mu.Lock()
defer pc.mu.Unlock()
return pc.canceledErr
}
// isReused reports whether this connection is in a known broken state.
func (pc *persistConn) isReused() bool {
pc.mu.Lock()
r := pc.reused
pc.mu.Unlock()
return r
}
func (pc *persistConn) gotIdleConnTrace(idleAt time.Time) (t httptrace.GotConnInfo) {
pc.mu.Lock()
defer pc.mu.Unlock()
t.Reused = pc.reused
t.Conn = pc.conn
t.WasIdle = true
if !idleAt.IsZero() {
t.IdleTime = time.Since(idleAt)
}
return
}
func (pc *persistConn) cancelRequest(err error) {
pc.mu.Lock()
defer pc.mu.Unlock()
pc.canceledErr = err
pc.closeLocked(errRequestCanceled)
}
// closeConnIfStillIdle closes the connection if it's still sitting idle.
// This is what's called by the persistConn's idleTimer, and is run in its
// own goroutine.
func (pc *persistConn) closeConnIfStillIdle() {
t := pc.t
t.idleMu.Lock()
defer t.idleMu.Unlock()
if _, ok := t.idleLRU.m[pc]; !ok {
// Not idle.
return
}
t.removeIdleConnLocked(pc)
pc.close(errIdleConnTimeout)
}
// mapRoundTripError returns the appropriate error value for
// persistConn.roundTrip.
//
// The provided err is the first error that (*persistConn).roundTrip
// happened to receive from its select statement.
//
// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
// started writing the request.
func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
if err == nil {
return nil
}
// If the request was canceled, that's better than network
// failures that were likely the result of tearing down the
// connection.
if cerr := pc.canceled(); cerr != nil {
return cerr
}
// See if an error was set explicitly.
req.mu.Lock()
reqErr := req.err
req.mu.Unlock()
if reqErr != nil {
return reqErr
}
if err == errServerClosedIdle {
// Don't decorate
return err
}
if _, ok := err.(transportReadFromServerError); ok {
// Don't decorate
return err
}
if pc.isBroken() {
<-pc.writeLoopDone
if pc.nwrite == startBytesWritten {
return nothingWrittenError{err}
}
return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
}
return err
}
func (pc *persistConn) readLoop() {
closeErr := errReadLoopExiting // default value, if not changed below
defer func() {
pc.close(closeErr)
pc.t.removeIdleConn(pc)
}()
tryPutIdleConn := func(trace *httptrace.ClientTrace) bool {
if err := pc.t.tryPutIdleConn(pc); err != nil {
closeErr = err
if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled {
trace.PutIdleConn(err)
}
return false
}
if trace != nil && trace.PutIdleConn != nil {
trace.PutIdleConn(nil)
}
return true
}
// eofc is used to block caller goroutines reading from Response.Body
// at EOF until this goroutines has (potentially) added the connection
// back to the idle pool.
eofc := make(chan struct{})
defer close(eofc) // unblock reader on errors
// Read this once, before loop starts. (to avoid races in tests)
testHookMu.Lock()
testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
testHookMu.Unlock()
alive := true
for alive {
pc.readLimit = pc.maxHeaderResponseSize()
_, err := pc.br.Peek(1)
pc.mu.Lock()
if pc.numExpectedResponses == 0 {
pc.readLoopPeekFailLocked(err)
pc.mu.Unlock()
return
}
pc.mu.Unlock()
rc := <-pc.reqch
trace := httptrace.ContextClientTrace(rc.req.Context())
var resp *Response
if err == nil {
resp, err = pc.readResponse(rc, trace)
} else {
err = transportReadFromServerError{err}
closeErr = err
}
if err != nil {
if pc.readLimit <= 0 {
err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
}
select {
case rc.ch <- responseAndError{err: err}:
case <-rc.callerGone:
return
}
return
}
pc.readLimit = maxInt64 // effictively no limit for response bodies
pc.mu.Lock()
pc.numExpectedResponses--
pc.mu.Unlock()
hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
if resp.Close || rc.req.Close || resp.StatusCode <= 199 {
// Don't do keep-alive on error if either party requested a close
// or we get an unexpected informational (1xx) response.
// StatusCode 100 is already handled above.
alive = false
}
if !hasBody {
pc.t.setReqCanceler(rc.req, nil)
// Put the idle conn back into the pool before we send the response
// so if they process it quickly and make another request, they'll
// get this same conn. But we use the unbuffered channel 'rc'
// to guarantee that persistConn.roundTrip got out of its select
// potentially waiting for this persistConn to close.
// but after
alive = alive &&
!pc.sawEOF &&
pc.wroteRequest() &&
tryPutIdleConn(trace)
select {
case rc.ch <- responseAndError{res: resp}:
case <-rc.callerGone:
return
}
// Now that they've read from the unbuffered channel, they're safely
// out of the select that also waits on this goroutine to die, so
// we're allowed to exit now if needed (if alive is false)
testHookReadLoopBeforeNextRead()
continue
}
waitForBodyRead := make(chan bool, 2)
body := &bodyEOFSignal{
body: resp.Body,
earlyCloseFn: func() error {
waitForBodyRead <- false
<-eofc // will be closed by deferred call at the end of the function
return nil
},
fn: func(err error) error {
isEOF := err == io.EOF
waitForBodyRead <- isEOF
if isEOF {
<-eofc // see comment above eofc declaration
} else if err != nil {
if cerr := pc.canceled(); cerr != nil {
return cerr
}
}
return err
},
}
resp.Body = body
if rc.addedGzip && strings.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
resp.Body = &gzipReader{body: body}
resp.Header.Del("Content-Encoding")
resp.Header.Del("Content-Length")
resp.ContentLength = -1
resp.Uncompressed = true
}
select {
case rc.ch <- responseAndError{res: resp}:
case <-rc.callerGone:
return
}
// Before looping back to the top of this function and peeking on
// the bufio.Reader, wait for the caller goroutine to finish
// reading the response body. (or for cancelation or death)
select {
case bodyEOF := <-waitForBodyRead:
pc.t.setReqCanceler(rc.req, nil) // before pc might return to idle pool
alive = alive &&
bodyEOF &&
!pc.sawEOF &&
pc.wroteRequest() &&
tryPutIdleConn(trace)
if bodyEOF {
eofc <- struct{}{}
}
case <-rc.req.Cancel:
alive = false
pc.t.CancelRequest(rc.req)
case <-rc.req.Context().Done():
alive = false
pc.t.cancelRequest(rc.req, rc.req.Context().Err())
case <-pc.closech:
alive = false
}
testHookReadLoopBeforeNextRead()
}
}
func (pc *persistConn) readLoopPeekFailLocked(peekErr error) {
if pc.closed != nil {
return
}
if n := pc.br.Buffered(); n > 0 {
buf, _ := pc.br.Peek(n)
log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr)
}
if peekErr == io.EOF {
// common case.
pc.closeLocked(errServerClosedIdle)
} else {
pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %v", peekErr))
}
}
// readResponse reads an HTTP response (or two, in the case of "Expect:
// 100-continue") from the server. It returns the final non-100 one.
// trace is optional.
func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *Response, err error) {
if trace != nil && trace.GotFirstResponseByte != nil {
if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 {
trace.GotFirstResponseByte()
}
}
resp, err = ReadResponse(pc.br, rc.req)
if err != nil {
return
}
if rc.continueCh != nil {
if resp.StatusCode == 100 {
if trace != nil && trace.Got100Continue != nil {
trace.Got100Continue()
}
rc.continueCh <- struct{}{}
} else {
close(rc.continueCh)
}
}
if resp.StatusCode == 100 {
pc.readLimit = pc.maxHeaderResponseSize() // reset the limit
resp, err = ReadResponse(pc.br, rc.req)
if err != nil {
return
}
}
resp.TLS = pc.tlsState
return
}
// waitForContinue returns the function to block until
// any response, timeout or connection close. After any of them,
// the function returns a bool which indicates if the body should be sent.
func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool {
if continueCh == nil {
return nil
}
return func() bool {
timer := time.NewTimer(pc.t.ExpectContinueTimeout)
defer timer.Stop()
select {
case _, ok := <-continueCh:
return ok
case <-timer.C:
return true
case <-pc.closech:
return false
}
}
}
// nothingWrittenError wraps a write errors which ended up writing zero bytes.
type nothingWrittenError struct {
error
}
func (pc *persistConn) writeLoop() {
defer close(pc.writeLoopDone)
for {
select {
case wr := <-pc.writech:
startBytesWritten := pc.nwrite
err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
if bre, ok := err.(requestBodyReadError); ok {
err = bre.error
// Errors reading from the user's
// Request.Body are high priority.
// Set it here before sending on the
// channels below or calling
// pc.close() which tears town
// connections and causes other
// errors.
wr.req.setError(err)
}
if err == nil {
err = pc.bw.Flush()
}
if err != nil {
wr.req.Request.closeBody()
if pc.nwrite == startBytesWritten {
err = nothingWrittenError{err}
}
}
pc.writeErrCh <- err // to the body reader, which might recycle us
wr.ch <- err // to the roundTrip function
if err != nil {
pc.close(err)
return
}
case <-pc.closech:
return
}
}
}
// wroteRequest is a check before recycling a connection that the previous write
// (from writeLoop above) happened and was successful.
func (pc *persistConn) wroteRequest() bool {
select {
case err := <-pc.writeErrCh:
// Common case: the write happened well before the response, so
// avoid creating a timer.
return err == nil
default:
// Rare case: the request was written in writeLoop above but
// before it could send to pc.writeErrCh, the reader read it
// all, processed it, and called us here. In this case, give the
// write goroutine a bit of time to finish its send.
//
// Less rare case: We also get here in the legitimate case of
// Issue 7569, where the writer is still writing (or stalled),
// but the server has already replied. In this case, we don't
// want to wait too long, and we want to return false so this
// connection isn't re-used.
select {
case err := <-pc.writeErrCh:
return err == nil
case <-time.After(50 * time.Millisecond):
return false
}
}
}
// responseAndError is how the goroutine reading from an HTTP/1 server
// communicates with the goroutine doing the RoundTrip.
type responseAndError struct {
res *Response // else use this response (see res method)
err error
}
type requestAndChan struct {
req *Request
ch chan responseAndError // unbuffered; always send in select on callerGone
// whether the Transport (as opposed to the user client code)
// added the Accept-Encoding gzip header. If the Transport
// set it, only then do we transparently decode the gzip.
addedGzip bool
// Optional blocking chan for Expect: 100-continue (for send).
// If the request has an "Expect: 100-continue" header and
// the server responds 100 Continue, readLoop send a value
// to writeLoop via this chan.
continueCh chan<- struct{}
callerGone <-chan struct{} // closed when roundTrip caller has returned
}
// A writeRequest is sent by the readLoop's goroutine to the
// writeLoop's goroutine to write a request while the read loop
// concurrently waits on both the write response and the server's
// reply.
type writeRequest struct {
req *transportRequest
ch chan<- error
// Optional blocking chan for Expect: 100-continue (for receive).
// If not nil, writeLoop blocks sending request body until
// it receives from this chan.
continueCh <-chan struct{}
}
type httpError struct {
err string
timeout bool
}
func (e *httpError) Error() string { return e.err }
func (e *httpError) Timeout() bool { return e.timeout }
func (e *httpError) Temporary() bool { return true }
var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
var errRequestCanceled = errors.New("net/http: request canceled")
var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify?
func nop() {}
// testHooks. Always non-nil.
var (
testHookEnterRoundTrip = nop
testHookWaitResLoop = nop
testHookRoundTripRetried = nop
testHookPrePendingDial = nop
testHookPostPendingDial = nop
testHookMu sync.Locker = fakeLocker{} // guards following
testHookReadLoopBeforeNextRead = nop
)
func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
testHookEnterRoundTrip()
if !pc.t.replaceReqCanceler(req.Request, pc.cancelRequest) {
pc.t.putOrCloseIdleConn(pc)
return nil, errRequestCanceled
}
pc.mu.Lock()
pc.numExpectedResponses++
headerFn := pc.mutateHeaderFunc
pc.mu.Unlock()
if headerFn != nil {
headerFn(req.extraHeaders())
}
// Ask for a compressed version if the caller didn't set their
// own value for Accept-Encoding. We only attempt to
// uncompress the gzip stream if we were the layer that
// requested it.
requestedGzip := false
if !pc.t.DisableCompression &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// https://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
requestedGzip = true
req.extraHeaders().Set("Accept-Encoding", "gzip")
}
var continueCh chan struct{}
if req.ProtoAtLeast(1, 1) && req.Body != nil && req.expectsContinue() {
continueCh = make(chan struct{}, 1)
}
if pc.t.DisableKeepAlives {
req.extraHeaders().Set("Connection", "close")
}
gone := make(chan struct{})
defer close(gone)
defer func() {
if err != nil {
pc.t.setReqCanceler(req.Request, nil)
}
}()
const debugRoundTrip = false
// Write the request concurrently with waiting for a response,
// in case the server decides to reply before reading our full
// request body.
startBytesWritten := pc.nwrite
writeErrCh := make(chan error, 1)
pc.writech <- writeRequest{req, writeErrCh, continueCh}
resc := make(chan responseAndError)
pc.reqch <- requestAndChan{
req: req.Request,
ch: resc,
addedGzip: requestedGzip,
continueCh: continueCh,
callerGone: gone,
}
var respHeaderTimer <-chan time.Time
cancelChan := req.Request.Cancel
ctxDoneChan := req.Context().Done()
for {
testHookWaitResLoop()
select {
case err := <-writeErrCh:
if debugRoundTrip {
req.logf("writeErrCh resv: %T/%#v", err, err)
}
if err != nil {
pc.close(fmt.Errorf("write error: %v", err))
return nil, pc.mapRoundTripError(req, startBytesWritten, err)
}
if d := pc.t.ResponseHeaderTimeout; d > 0 {
if debugRoundTrip {
req.logf("starting timer for %v", d)
}
timer := time.NewTimer(d)
defer timer.Stop() // prevent leaks
respHeaderTimer = timer.C
}
case <-pc.closech:
if debugRoundTrip {
req.logf("closech recv: %T %#v", pc.closed, pc.closed)
}
return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
case <-respHeaderTimer:
if debugRoundTrip {
req.logf("timeout waiting for response headers.")
}
pc.close(errTimeout)
return nil, errTimeout
case re := <-resc:
if (re.res == nil) == (re.err == nil) {
panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
}
if debugRoundTrip {
req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
}
if re.err != nil {
return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
}
return re.res, nil
case <-cancelChan:
pc.t.CancelRequest(req.Request)
cancelChan = nil
case <-ctxDoneChan:
pc.t.cancelRequest(req.Request, req.Context().Err())
cancelChan = nil
ctxDoneChan = nil
}
}
}
// tLogKey is a context WithValue key for test debugging contexts containing
// a t.Logf func. See export_test.go's Request.WithT method.
type tLogKey struct{}
func (tr *transportRequest) logf(format string, args ...interface{}) {
if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...interface{})); ok {
logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
}
}
// markReused marks this connection as having been successfully used for a
// request and response.
func (pc *persistConn) markReused() {
pc.mu.Lock()
pc.reused = true
pc.mu.Unlock()
}
// close closes the underlying TCP connection and closes
// the pc.closech channel.
//
// The provided err is only for testing and debugging; in normal
// circumstances it should never be seen by users.
func (pc *persistConn) close(err error) {
pc.mu.Lock()
defer pc.mu.Unlock()
pc.closeLocked(err)
}
func (pc *persistConn) closeLocked(err error) {
if err == nil {
panic("nil error")
}
pc.broken = true
if pc.closed == nil {
pc.closed = err
if pc.alt != nil {
// Do nothing; can only get here via getConn's
// handlePendingDial's putOrCloseIdleConn when
// it turns out the abandoned connection in
// flight ended up negotiating an alternate
// protocol. We don't use the connection
// freelist for http2. That's done by the
// alternate protocol's RoundTripper.
} else {
pc.conn.Close()
close(pc.closech)
}
}
pc.mutateHeaderFunc = nil
}
var portMap = map[string]string{
"http": "80",
"https": "443",
"socks5": "1080",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
func canonicalAddr(url *url.URL) string {
addr := url.Hostname()
if v, err := idnaASCII(addr); err == nil {
addr = v
}
port := url.Port()
if port == "" {
port = portMap[url.Scheme]
}
return net.JoinHostPort(addr, port)
}
// bodyEOFSignal is used by the HTTP/1 transport when reading response
// bodies to make sure we see the end of a response body before
// proceeding and reading on the connection again.
//
// It wraps a ReadCloser but runs fn (if non-nil) at most
// once, right before its final (error-producing) Read or Close call
// returns. fn should return the new error to return from Read or Close.
//
// If earlyCloseFn is non-nil and Close is called before io.EOF is
// seen, earlyCloseFn is called instead of fn, and its return value is
// the return value from Close.
type bodyEOFSignal struct {
body io.ReadCloser
mu sync.Mutex // guards following 4 fields
closed bool // whether Close has been called
rerr error // sticky Read error
fn func(error) error // err will be nil on Read io.EOF
earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
}
var errReadOnClosedResBody = errors.New("http: read on closed response body")
func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
es.mu.Lock()
closed, rerr := es.closed, es.rerr
es.mu.Unlock()
if closed {
return 0, errReadOnClosedResBody
}
if rerr != nil {
return 0, rerr
}
n, err = es.body.Read(p)
if err != nil {
es.mu.Lock()
defer es.mu.Unlock()
if es.rerr == nil {
es.rerr = err
}
err = es.condfn(err)
}
return
}
func (es *bodyEOFSignal) Close() error {
es.mu.Lock()
defer es.mu.Unlock()
if es.closed {
return nil
}
es.closed = true
if es.earlyCloseFn != nil && es.rerr != io.EOF {
return es.earlyCloseFn()
}
err := es.body.Close()
return es.condfn(err)
}
// caller must hold es.mu.
func (es *bodyEOFSignal) condfn(err error) error {
if es.fn == nil {
return err
}
err = es.fn(err)
es.fn = nil
return err
}
// gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read
type gzipReader struct {
body *bodyEOFSignal // underlying HTTP/1 response body framing
zr *gzip.Reader // lazily-initialized gzip reader
zerr error // any error from gzip.NewReader; sticky
}
func (gz *gzipReader) Read(p []byte) (n int, err error) {
if gz.zr == nil {
if gz.zerr == nil {
gz.zr, gz.zerr = gzip.NewReader(gz.body)
}
if gz.zerr != nil {
return 0, gz.zerr
}
}
gz.body.mu.Lock()
if gz.body.closed {
err = errReadOnClosedResBody
}
gz.body.mu.Unlock()
if err != nil {
return 0, err
}
return gz.zr.Read(p)
}
func (gz *gzipReader) Close() error {
return gz.body.Close()
}
type readerAndCloser struct {
io.Reader
io.Closer
}
type tlsHandshakeTimeoutError struct{}
func (tlsHandshakeTimeoutError) Timeout() bool { return true }
func (tlsHandshakeTimeoutError) Temporary() bool { return true }
func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
// fakeLocker is a sync.Locker which does nothing. It's used to guard
// test-only fields when not under test, to avoid runtime atomic
// overhead.
type fakeLocker struct{}
func (fakeLocker) Lock() {}
func (fakeLocker) Unlock() {}
// clneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if
// cfg is nil. This is safe to call even if cfg is in active use by a TLS
// client or server.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}
type connLRU struct {
ll *list.List // list.Element.Value type is of *persistConn
m map[*persistConn]*list.Element
}
// add adds pc to the head of the linked list.
func (cl *connLRU) add(pc *persistConn) {
if cl.ll == nil {
cl.ll = list.New()
cl.m = make(map[*persistConn]*list.Element)
}
ele := cl.ll.PushFront(pc)
if _, ok := cl.m[pc]; ok {
panic("persistConn was already in LRU")
}
cl.m[pc] = ele
}
func (cl *connLRU) removeOldest() *persistConn {
ele := cl.ll.Back()
pc := ele.Value.(*persistConn)
cl.ll.Remove(ele)
delete(cl.m, pc)
return pc
}
// remove removes pc from cl.
func (cl *connLRU) remove(pc *persistConn) {
if ele, ok := cl.m[pc]; ok {
cl.ll.Remove(ele)
delete(cl.m, pc)
}
}
// len returns the number of items in the cache.
func (cl *connLRU) len() int {
return len(cl.m)
}
// validPort reports whether p (without the colon) is a valid port in
// a URL, per RFC 3986 Section 3.2.3, which says the port may be
// empty, or only contain digits.
func validPort(p string) bool {
for _, r := range []byte(p) {
if r < '0' || r > '9' {
return false
}
}
return true
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
rasa/cli/arguments/data.py
|
import argparse
from typing import Text
from rasa.cli.arguments.default_arguments import (
add_nlu_data_param,
add_out_param,
add_data_param,
add_domain_param,
)
from rasa.shared.constants import DEFAULT_CONVERTED_DATA_PATH, DEFAULT_DOMAIN_PATH
def set_convert_arguments(parser: argparse.ArgumentParser, data_type: Text) -> None:
"""Sets convert command arguments."""
parser.add_argument(
"-f",
"--format",
default="yaml",
choices=["json", "yaml"],
help="Output format the training data should be converted into.",
)
add_data_param(parser, required=True, data_type=data_type)
add_out_param(
parser,
default=DEFAULT_CONVERTED_DATA_PATH,
help_text="File (for `json`) or existing path (for `yaml`) "
"where to save training data in Rasa format.",
)
parser.add_argument("-l", "--language", default="en", help="Language of data.")
def set_split_arguments(parser: argparse.ArgumentParser) -> None:
add_nlu_data_param(parser, help_text="File or folder containing your NLU data.")
parser.add_argument(
"--training-fraction",
type=float,
default=0.8,
help="Percentage of the data which should be in the training data.",
)
parser.add_argument(
"--random-seed",
type=int,
default=None,
help="Seed to generate the same train/test split.",
)
add_out_param(
parser,
default="train_test_split",
help_text="Directory where the split files should be stored.",
)
def set_validator_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--fail-on-warnings",
default=False,
action="store_true",
help="Fail validation on warnings and errors. "
"If omitted only errors will result in a non zero exit code.",
)
add_domain_param(parser)
add_data_param(parser)
def set_migrate_arguments(parser: argparse.ArgumentParser) -> None:
"""Sets migrate command arguments."""
add_domain_param(parser)
add_out_param(
parser,
default=DEFAULT_DOMAIN_PATH,
help_text="Path (for `yaml`) where to save migrated domain in Rasa 3.0 format.",
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
code/go/0chain.net/miner/miner/main.go
|
package main
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"log"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"strconv"
"time"
"go.uber.org/zap"
"0chain.net/chaincore/block"
"0chain.net/chaincore/chain"
"0chain.net/chaincore/client"
"0chain.net/chaincore/config"
"0chain.net/chaincore/diagnostics"
"0chain.net/chaincore/node"
"0chain.net/chaincore/round"
"0chain.net/chaincore/state"
"0chain.net/chaincore/threshold/bls"
"0chain.net/chaincore/transaction"
"0chain.net/core/build"
"0chain.net/core/common"
"0chain.net/core/ememorystore"
"0chain.net/core/logging"
"0chain.net/core/memorystore"
"0chain.net/core/viper"
"0chain.net/miner"
"0chain.net/smartcontract/setupsc"
)
func main() {
deploymentMode := flag.Int("deployment_mode", 2, "deployment_mode")
keysFile := flag.String("keys_file", "", "keys_file")
dkgFile := flag.String("dkg_file", "", "dkg_file")
delayFile := flag.String("delay_file", "", "delay_file")
magicBlockFile := flag.String("magic_block_file", "", "magic_block_file")
initialStatesFile := flag.String("initial_states", "", "initial_states")
flag.Parse()
config.Configuration.DeploymentMode = byte(*deploymentMode)
config.SetupDefaultConfig()
config.SetupConfig()
config.SetupSmartContractConfig()
if config.Development() {
logging.InitLogging("development")
} else {
logging.InitLogging("production")
}
config.Configuration.ChainID = viper.GetString("server_chain.id")
transaction.SetTxnTimeout(int64(viper.GetInt("server_chain.transaction.timeout")))
transaction.SetTxnFee(viper.GetInt64("server_chain.transaction.min_fee"))
config.SetServerChainID(config.Configuration.ChainID)
common.SetupRootContext(node.GetNodeContext())
ctx := common.GetRootContext()
initEntities()
serverChain := chain.NewChainFromConfig()
signatureScheme := serverChain.GetSignatureScheme()
logging.Logger.Info("Owner keys file", zap.String("filename", *keysFile))
reader, err := os.Open(*keysFile)
if err != nil {
panic(err)
}
err = signatureScheme.ReadKeys(reader)
if err != nil {
logging.Logger.Panic("Error reading keys file")
}
reader.Close()
node.Self.SetSignatureScheme(signatureScheme)
miner.SetupMinerChain(serverChain)
mc := miner.GetMinerChain()
mc.SetDiscoverClients(viper.GetBool("server_chain.client.discover"))
mc.SetGenerationTimeout(viper.GetInt("server_chain.block.generation.timeout"))
mc.SetSyncStateTimeout(viper.GetDuration("server_chain.state.sync.timeout") * time.Second)
mc.SetBCStuckCheckInterval(viper.GetDuration("server_chain.stuck.check_interval") * time.Second)
mc.SetBCStuckTimeThreshold(viper.GetDuration("server_chain.stuck.time_threshold") * time.Second)
mc.SetRetryWaitTime(viper.GetInt("server_chain.block.generation.retry_wait_time"))
mc.SetupConfigInfoDB()
chain.SetServerChain(serverChain)
miner.SetNetworkRelayTime(viper.GetDuration("network.relay_time") * time.Millisecond)
node.ReadConfig()
if *initialStatesFile == "" {
*initialStatesFile = viper.GetString("network.initial_states")
}
initStates := state.NewInitStates()
initStateErr := initStates.Read(*initialStatesFile)
// if there's no magic_block_file commandline flag, use configured then
if *magicBlockFile == "" {
*magicBlockFile = viper.GetString("network.magic_block_file")
}
var magicBlock *block.MagicBlock
dnsURL := viper.GetString("network.dns_url")
if dnsURL == "" {
magicBlock, err = chain.ReadMagicBlockFile(*magicBlockFile)
if err != nil {
logging.Logger.Panic("can't read magic block file", zap.Error(err))
return
}
} else {
magicBlock, err = chain.GetMagicBlockFrom0DNS(dnsURL)
if err != nil {
logging.Logger.Panic("can't read magic block from DNS", zap.Error(err))
return
}
}
if state.Debug() {
block.SetupStateLogger("/tmp/state.txt")
}
gb := mc.SetupGenesisBlock(viper.GetString("server_chain.genesis_block.id"),
magicBlock, initStates)
mb := mc.GetLatestMagicBlock()
logging.Logger.Info("Miners in main", zap.Int("size", mb.Miners.Size()))
if !mb.IsActiveNode(node.Self.Underlying().GetKey(), 0) {
hostName, n2nHostName, portNum, path, description, err := readNonGenesisHostAndPort(keysFile)
if err != nil {
logging.Logger.Panic("Error reading keys file. Non-genesis miner has no host or port number",
zap.Error(err))
}
logging.Logger.Info("Inside nonGenesis", zap.String("host_name", hostName),
zap.Any("n2n_host_name", n2nHostName), zap.Int("port_num", portNum), zap.String("path", path), zap.String("description", description))
node.Self.Underlying().Host = hostName
node.Self.Underlying().N2NHost = n2nHostName
node.Self.Underlying().Port = portNum
node.Self.Underlying().Path = path
node.Self.Underlying().Description = description
} else {
if initStateErr != nil {
logging.Logger.Panic("Failed to read initialStates", zap.Any("Error", initStateErr))
}
}
if node.Self.Underlying().GetKey() == "" {
logging.Logger.Panic("node definition for self node doesn't exist")
}
if node.Self.Underlying().Type != node.NodeTypeMiner {
logging.Logger.Panic("node not configured as miner")
}
err = common.NewError("saving self as client", "client save")
for err != nil {
_, err = client.PutClient(ctx, &node.Self.Underlying().Client)
}
if config.Development() {
if *delayFile != "" {
node.ReadNetworkDelays(*delayFile)
}
}
mode := "main net"
if config.Development() {
mode = "development"
} else if config.TestNet() {
mode = "test net"
}
var address = fmt.Sprintf(":%v", node.Self.Underlying().Port)
logging.Logger.Info("Starting miner", zap.String("build_tag", build.BuildTag), zap.String("go_version", runtime.Version()), zap.Int("available_cpus", runtime.NumCPU()), zap.String("port", address))
logging.Logger.Info("Chain info", zap.String("chain_id", config.GetServerChainID()), zap.String("mode", mode))
logging.Logger.Info("Self identity", zap.Any("set_index", node.Self.Underlying().SetIndex), zap.Any("id", node.Self.Underlying().GetKey()))
initIntegrationsTests(node.Self.Underlying().GetKey())
defer shutdownIntegrationTests()
var server *http.Server
if config.Development() {
// No WriteTimeout setup to enable pprof
server = &http.Server{
Addr: address,
ReadTimeout: 30 * time.Second,
MaxHeaderBytes: 1 << 20,
}
} else {
server = &http.Server{
Addr: address,
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
MaxHeaderBytes: 1 << 20,
}
}
common.HandleShutdown(server)
memorystore.GetInfo()
common.ConfigRateLimits()
initN2NHandlers()
initWorkers(ctx)
// Load previous MB and related DKG if any. Don't load the latest, since
// it can be promoted (not finalized).
mc.LoadMagicBlocksAndDKG(ctx)
if err = mc.WaitForActiveSharders(ctx); err != nil {
logging.Logger.Error("failed to wait sharders", zap.Error(err))
}
if err = mc.UpdateLatesMagicBlockFromSharders(ctx); err != nil {
logging.Logger.Panic(fmt.Sprintf("can't update LFMB from sharders, err: %v", err))
}
// ignoring error and without retries, restart round will resolve it
// if there is errors
mc.SetupLatestAndPreviousMagicBlocks(ctx)
mb = mc.GetLatestMagicBlock()
if mb.StartingRound == 0 && mb.IsActiveNode(node.Self.Underlying().GetKey(), mb.StartingRound) {
genesisDKG := viper.GetInt64("network.genesis_dkg")
dkgShare, oldDKGShare := &bls.DKGSummary{
SecretShares: make(map[string]string),
}, &bls.DKGSummary{}
dkgShare.ID = strconv.FormatInt(mb.MagicBlockNumber, 10)
if genesisDKG == 0 {
oldDKGShare, err = miner.ReadDKGSummaryFile(*dkgFile)
if err != nil {
logging.Logger.Panic(fmt.Sprintf("Error reading DKG file. ERROR: %v", err.Error()))
}
} else {
oldDKGShare, err = miner.LoadDKGSummary(ctx, strconv.FormatInt(genesisDKG, 10))
if err != nil {
if config.DevConfiguration.ViewChange {
logging.Logger.Error(fmt.Sprintf("Can't load genesis dkg: ERROR: %v", err.Error()))
} else {
logging.Logger.Panic(fmt.Sprintf("Can't load genesis dkg: ERROR: %v", err.Error()))
}
}
}
dkgShare.SecretShares = oldDKGShare.SecretShares
if err = dkgShare.Verify(bls.ComputeIDdkg(node.Self.Underlying().GetKey()), magicBlock.Mpks.GetMpkMap()); err != nil {
if config.DevConfiguration.ViewChange {
logging.Logger.Error("Failed to verify genesis dkg", zap.Any("error", err))
} else {
logging.Logger.Panic(fmt.Sprintf("Failed to verify genesis dkg: ERROR: %v", err.Error()))
}
}
if err = miner.StoreDKGSummary(ctx, dkgShare); err != nil {
logging.Logger.Panic(fmt.Sprintf("Failed to store genesis dkg: ERROR: %v", err.Error()))
}
}
initHandlers()
go func() {
logging.Logger.Info("Ready to listen to the requests")
log.Fatal(server.ListenAndServe())
}()
go mc.RegisterClient()
chain.StartTime = time.Now().UTC()
// start restart round event worker before the StartProtocol to be able
// to subscribe to its events
go mc.RestartRoundEventWorker(ctx)
var activeMiner = mb.Miners.HasNode(node.Self.Underlying().GetKey())
if activeMiner {
mb = mc.GetLatestMagicBlock()
if err := miner.SetDKGFromMagicBlocksChainPrev(ctx, mb); err != nil {
logging.Logger.Error("failed to set DKG", zap.Error(err))
} else {
miner.StartProtocol(ctx, gb)
}
}
mc.SetStarted()
miner.SetupWorkers(ctx)
if config.Development() {
go TransactionGenerator(mc.Chain)
}
if config.DevConfiguration.IsFeeEnabled {
go mc.SetupSC(ctx)
if config.DevConfiguration.ViewChange {
go mc.DKGProcess(ctx)
}
}
defer done(ctx)
<-ctx.Done()
time.Sleep(time.Second * 5)
}
func done(ctx context.Context) {
mc := miner.GetMinerChain()
mc.Stop()
}
func readNonGenesisHostAndPort(keysFile *string) (string, string, int, string, string, error) {
reader, err := os.Open(*keysFile)
if err != nil {
panic(err)
}
defer reader.Close()
scanner := bufio.NewScanner(reader)
scanner.Scan() // throw away the publickey
scanner.Scan() // throw away the secretkey
result := scanner.Scan()
if result == false {
return "", "", 0, "", "", errors.New("error reading Host")
}
h := scanner.Text()
logging.Logger.Info("Host inside", zap.String("host", h))
result = scanner.Scan()
if result == false {
return "", "", 0, "", "", errors.New("error reading n2n host")
}
n2nh := scanner.Text()
logging.Logger.Info("N2NHost inside", zap.String("n2n_host", n2nh))
scanner.Scan()
po, err := strconv.ParseInt(scanner.Text(), 10, 32)
p := int(po)
if err != nil {
return "", "", 0, "", "", err
}
result = scanner.Scan()
if result == false {
return h, n2nh, p, "", "", nil
}
path := scanner.Text()
logging.Logger.Info("Path inside", zap.String("path", path))
result = scanner.Scan()
if result == false {
return h, n2nh, p, path, "", nil
}
description := scanner.Text()
logging.Logger.Info("Description inside", zap.String("description", description))
return h, n2nh, p, path, description, nil
}
func initEntities() {
memorystore.InitDefaultPool(os.Getenv("REDIS_HOST"), 6379)
memoryStorage := memorystore.GetStorageProvider()
chain.SetupEntity(memoryStorage)
round.SetupEntity(memoryStorage)
round.SetupVRFShareEntity(memoryStorage)
block.SetupEntity(memoryStorage)
block.SetupBlockSummaryEntity(memoryStorage)
block.SetupStateChange(memoryStorage)
state.SetupPartialState(memoryStorage)
state.SetupStateNodes(memoryStorage)
client.SetupEntity(memoryStorage)
transaction.SetupTransactionDB()
transaction.SetupEntity(memoryStorage)
miner.SetupNotarizationEntity()
miner.SetupStartChainEntity()
ememoryStorage := ememorystore.GetStorageProvider()
bls.SetupDKGEntity()
bls.SetupDKGSummary(ememoryStorage)
bls.SetupDKGDB()
setupsc.SetupSmartContracts()
block.SetupMagicBlockData(ememoryStorage)
block.SetupMagicBlockDataDB()
}
func initHandlers() {
SetupHandlers()
config.SetupHandlers()
node.SetupHandlers()
chain.SetupHandlers()
client.SetupHandlers()
transaction.SetupHandlers()
block.SetupHandlers()
miner.SetupHandlers()
diagnostics.SetupHandlers()
chain.SetupStateHandlers()
serverChain := chain.GetServerChain()
serverChain.SetupNodeHandlers()
}
func initN2NHandlers() {
node.SetupN2NHandlers()
miner.SetupM2MReceivers()
miner.SetupM2MSenders()
miner.SetupM2SSenders()
miner.SetupM2SRequestors()
miner.SetupM2MRequestors()
miner.SetupX2MResponders()
chain.SetupX2XResponders()
chain.SetupX2MRequestors()
chain.SetupX2SRequestors()
}
func initWorkers(ctx context.Context) {
serverChain := chain.GetServerChain()
serverChain.SetupWorkers(ctx)
//miner.SetupWorkers(ctx)
transaction.SetupWorkers(ctx)
}
|
[
"\"REDIS_HOST\""
] |
[] |
[
"REDIS_HOST"
] |
[]
|
["REDIS_HOST"]
|
go
| 1 | 0 | |
src/test/java/com/baomidou/plugin/idea/mybatisx/jpa/file/CreateFileTest.java
|
package com.baomidou.plugin.idea.mybatisx.jpa.file;
import com.baomidou.plugin.idea.mybatisx.util.ClassCreator;
import com.baomidou.plugin.idea.mybatisx.util.StringUtils;
import com.intellij.codeInsight.lookup.LookupElementBuilder;
import com.intellij.openapi.application.WriteAction;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.JavaPsiFacade;
import com.intellij.psi.PsiClass;
import com.intellij.psi.PsiDirectory;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiElementFactory;
import com.intellij.psi.PsiField;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiJavaFile;
import com.intellij.psi.PsiMethod;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.testFramework.IdeaTestUtil;
import com.intellij.testFramework.builders.JavaModuleFixtureBuilder;
import com.intellij.testFramework.fixtures.JavaCodeInsightFixtureTestCase;
import java.io.OutputStream;
import java.util.HashSet;
import java.util.Set;
public class CreateFileTest extends JavaCodeInsightFixtureTestCase {
@Override
protected String getTestDataPath() {
return "src/test/resources/module";
}
public void testCreateFile() {
myFixture.configureByFiles("domain/EntityClass.java", "domain/EntityParentClass.java");
Project project = getProject();
Set<String> allowFields = new HashSet<>();
allowFields.add("name");
allowFields.add("amount");
JavaPsiFacade javaPsiFacade = JavaPsiFacade.getInstance(project);
GlobalSearchScope globalSearchScope = GlobalSearchScope.allScope(project);
PsiClass entityClass = javaPsiFacade.findClass("domain.EntityClass", globalSearchScope);
String entityClassIdAgeDTO = "EntityClassIdAgeDTO";
ClassCreator classCreator = new ClassCreator();
classCreator.createFromAllowedFields(allowFields, entityClass, entityClassIdAgeDTO);
}
@Override
protected void tuneFixture(JavaModuleFixtureBuilder moduleBuilder) throws Exception {
super.tuneFixture(moduleBuilder);
String javaHome = System.getenv("JAVA_HOME");
if (javaHome == null) {
javaHome = IdeaTestUtil.getMockJdk18Path().getPath();
}
moduleBuilder.addJdk(javaHome);
moduleBuilder.addLibrary("mp3-lib", "src/test/testData/lib");
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
vote/app.py
|
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Lebron")
option_b = os.getenv('OPTION_B', "Jordan")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
[] |
[] |
[
"OPTION_A",
"OPTION_B"
] |
[]
|
["OPTION_A", "OPTION_B"]
|
python
| 2 | 0 | |
cmd/syncthing/locations.go
|
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/syncthing/syncthing/lib/fs"
)
type locationEnum string
// Use strings as keys to make printout and serialization of the locations map
// more meaningful.
const (
locConfigFile locationEnum = "config"
locCertFile = "certFile"
locKeyFile = "keyFile"
locHTTPSCertFile = "httpsCertFile"
locHTTPSKeyFile = "httpsKeyFile"
locDatabase = "database"
locLogFile = "logFile"
locCsrfTokens = "csrfTokens"
locPanicLog = "panicLog"
locAuditLog = "auditLog"
locGUIAssets = "GUIAssets"
locDefFolder = "defFolder"
)
// Platform dependent directories
var baseDirs = map[string]string{
"config": defaultConfigDir(), // Overridden by -home flag
"home": homeDir(), // User's home directory, *not* -home flag
}
// Use the variables from baseDirs here
var locations = map[locationEnum]string{
locConfigFile: "${config}/config.xml",
locCertFile: "${config}/cert.pem",
locKeyFile: "${config}/key.pem",
locHTTPSCertFile: "${config}/https-cert.pem",
locHTTPSKeyFile: "${config}/https-key.pem",
locDatabase: "${config}/index-v0.14.0.db",
locLogFile: "${config}/syncthing.log", // -logfile on Windows
locCsrfTokens: "${config}/csrftokens.txt",
locPanicLog: "${config}/panic-${timestamp}.log",
locAuditLog: "${config}/audit-${timestamp}.log",
locGUIAssets: "${config}/gui",
locDefFolder: "${home}/Sync",
}
// expandLocations replaces the variables in the location map with actual
// directory locations.
func expandLocations() error {
for key, dir := range locations {
for varName, value := range baseDirs {
dir = strings.Replace(dir, "${"+varName+"}", value, -1)
}
var err error
dir, err = fs.ExpandTilde(dir)
if err != nil {
return err
}
locations[key] = dir
}
return nil
}
// defaultConfigDir returns the default configuration directory, as figured
// out by various the environment variables present on each platform, or dies
// trying.
func defaultConfigDir() string {
switch runtime.GOOS {
case "windows":
if p := os.Getenv("LocalAppData"); p != "" {
return filepath.Join(p, "Syncthing")
}
return filepath.Join(os.Getenv("AppData"), "Syncthing")
case "darwin":
dir, err := fs.ExpandTilde("~/Library/Application Support/Syncthing")
if err != nil {
l.Fatalln(err)
}
return dir
default:
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
return filepath.Join(xdgCfg, "syncthing")
}
dir, err := fs.ExpandTilde("~/.config/syncthing")
if err != nil {
l.Fatalln(err)
}
return dir
}
}
// homeDir returns the user's home directory, or dies trying.
func homeDir() string {
home, err := fs.ExpandTilde("~")
if err != nil {
l.Fatalln(err)
}
return home
}
func timestampedLoc(key locationEnum) string {
// We take the roundtrip via "${timestamp}" instead of passing the path
// directly through time.Format() to avoid issues when the path we are
// expanding contains numbers; otherwise for example
// /home/user2006/.../panic-20060102-150405.log would get both instances of
// 2006 replaced by 2015...
tpl := locations[key]
now := time.Now().Format("20060102-150405")
return strings.Replace(tpl, "${timestamp}", now, -1)
}
|
[
"\"LocalAppData\"",
"\"AppData\"",
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"LocalAppData",
"XDG_CONFIG_HOME",
"AppData"
] |
[]
|
["LocalAppData", "XDG_CONFIG_HOME", "AppData"]
|
go
| 3 | 0 | |
providers/ibm/database_mongo.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
bluemix "github.com/IBM-Cloud/bluemix-go"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2"
"github.com/IBM-Cloud/bluemix-go/session"
)
// DatabaseMongoGenerator ...
type DatabaseMongoGenerator struct {
IBMService
}
// loadMongoDB ...
func (g DatabaseMongoGenerator) loadMongoDB(dbID string, dbName string) terraformutils.Resource {
resources := terraformutils.NewSimpleResource(
dbID,
dbName,
"ibm_database",
"ibm",
[]string{})
return resources
}
// InitResources ...
func (g *DatabaseMongoGenerator) InitResources() error {
region := os.Getenv("IC_REGION")
bmxConfig := &bluemix.Config{
BluemixAPIKey: os.Getenv("IC_API_KEY"),
Region: region,
}
sess, err := session.New(bmxConfig)
if err != nil {
return err
}
catalogClient, err := catalog.New(sess)
if err != nil {
return err
}
controllerClient, err := controllerv2.New(sess)
if err != nil {
return err
}
serviceID, err := catalogClient.ResourceCatalog().FindByName("databases-for-mongodb", true)
if err != nil {
return err
}
query := controllerv2.ServiceInstanceQuery{
ServiceID: serviceID[0].ID,
}
mongoInstances, err := controllerClient.ResourceServiceInstanceV2().ListInstances(query)
if err != nil {
return err
}
for _, db := range mongoInstances {
if db.RegionID == region {
g.Resources = append(g.Resources, g.loadMongoDB(db.ID, db.Name))
}
}
return nil
}
|
[
"\"IC_REGION\"",
"\"IC_API_KEY\""
] |
[] |
[
"IC_API_KEY",
"IC_REGION"
] |
[]
|
["IC_API_KEY", "IC_REGION"]
|
go
| 2 | 0 | |
app/job/main/growup/dao/email/dao_test.go
|
package email
import (
"flag"
"go-common/app/job/main/growup/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "mobile.studio.growup-job")
flag.Set("conf_token", "8781e02680f40996bc01eb1248ac2ac9")
flag.Set("tree_id", "14716")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/growup-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
extensions/cdc-mysql/src/test/java/com/hazelcast/jet/cdc/mysql/MySqlCdcNetworkIntegrationTest.java
|
/*
* Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.cdc.mysql;
import com.github.dockerjava.api.command.CreateContainerCmd;
import com.github.dockerjava.api.model.ExposedPort;
import com.github.dockerjava.api.model.PortBinding;
import com.github.dockerjava.api.model.Ports;
import com.hazelcast.jet.JetException;
import com.hazelcast.jet.JetInstance;
import com.hazelcast.jet.Job;
import com.hazelcast.jet.cdc.AbstractCdcIntegrationTest;
import com.hazelcast.jet.cdc.ChangeRecord;
import com.hazelcast.jet.pipeline.Pipeline;
import com.hazelcast.jet.pipeline.Sinks;
import com.hazelcast.jet.pipeline.StreamSource;
import com.hazelcast.jet.retry.RetryStrategies;
import com.hazelcast.jet.retry.RetryStrategy;
import com.hazelcast.jet.test.SerialTest;
import com.hazelcast.test.HazelcastSerialParametersRunnerFactory;
import com.hazelcast.test.annotation.NightlyTest;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.testcontainers.containers.MySQLContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.containers.ToxiproxyContainer;
import java.io.IOException;
import java.net.ServerSocket;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static com.hazelcast.jet.Util.entry;
import static com.hazelcast.jet.core.JobStatus.RUNNING;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.runners.Parameterized.Parameter;
import static org.testcontainers.containers.MySQLContainer.MYSQL_PORT;
@RunWith(Parameterized.class)
@Parameterized.UseParametersRunnerFactory(HazelcastSerialParametersRunnerFactory.class)
@Category({SerialTest.class, NightlyTest.class})
public class MySqlCdcNetworkIntegrationTest extends AbstractCdcIntegrationTest {
private static final long RECONNECT_INTERVAL_MS = SECONDS.toMillis(1);
@Rule
public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
@Parameter(value = 0)
public RetryStrategy reconnectBehavior;
@Parameter(value = 1)
public boolean resetStateOnReconnect;
@Parameter(value = 2)
public String testName;
private MySQLContainer<?> mysql;
@Parameters(name = "{2}")
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][]{
{RetryStrategies.never(), false, "fail"},
{RetryStrategies.indefinitely(RECONNECT_INTERVAL_MS), false, "reconnect"},
{RetryStrategies.indefinitely(RECONNECT_INTERVAL_MS), true, "reconnect w/ state reset"}
});
}
@Before
public void before() {
//disable Testcontainer's automatic resource manager
//containers are cleaned up explicitly
//automatic resource manager is just an extra thing that can break
//(have had problems with it not being cleaned up properly itself)
environmentVariables.set("TESTCONTAINERS_RYUK_DISABLED", "true");
assertEquals("true", System.getenv("TESTCONTAINERS_RYUK_DISABLED"));
}
@After
public void after() {
if (mysql != null) {
stopContainer(mysql);
}
}
@Test
public void when_noDatabaseToConnectTo() throws Exception {
mysql = initMySql(null, 0);
String containerIpAddress = mysql.getContainerIpAddress();
stopContainer(mysql);
mysql = null;
int port = findRandomOpenPortInRange(MYSQL_PORT + 100, MYSQL_PORT + 1000);
Pipeline pipeline = initPipeline(containerIpAddress, port);
// when job starts
JetInstance jet = createJetMembers(2)[0];
Job job = jet.newJob(pipeline);
// then
boolean neverReconnect = reconnectBehavior.getMaxAttempts() == 0;
if (neverReconnect) {
// then job fails
assertJobFailsWithConnectException(job, false);
assertTrue(jet.getMap("results").isEmpty());
} else {
// and can't connect to DB
assertJobStatusEventually(job, RUNNING);
assertTrue(jet.getMap("results").isEmpty());
// and DB starts
mysql = initMySql(null, port);
try {
// then source connects successfully
assertEqualsEventually(() -> jet.getMap("results").size(), 4);
assertEquals(RUNNING, job.getStatus());
} finally {
abortJob(job);
}
}
}
@Test
public void when_networkDisconnectDuringSnapshotting_then_jetSourceIsStuckUntilReconnect() throws Exception {
try (
Network network = initNetwork();
ToxiproxyContainer toxiproxy = initToxiproxy(network);
) {
mysql = initMySql(network, null);
ToxiproxyContainer.ContainerProxy proxy = initProxy(toxiproxy, mysql);
Pipeline pipeline = initPipeline(proxy.getContainerIpAddress(), proxy.getProxyPort());
// when job starts
JetInstance jet = createJetMembers(2)[0];
Job job = jet.newJob(pipeline);
assertJobStatusEventually(job, RUNNING);
// and snapshotting is ongoing (we have no exact way of identifying
// the moment, but random sleep will catch it at least some of the time)
MILLISECONDS.sleep(ThreadLocalRandom.current().nextInt(0, 500));
// and connection is cut
proxy.setConnectionCut(true);
// and some time passes
MILLISECONDS.sleep(2 * RECONNECT_INTERVAL_MS);
// and connection recovers
proxy.setConnectionCut(false);
// then connector manages to reconnect and finish snapshot
try {
assertEqualsEventually(() -> jet.getMap("results").size(), 4);
} finally {
abortJob(job);
}
}
}
@Test
public void when_databaseShutdownDuringSnapshotting() throws Exception {
int port = findRandomOpenPort();
mysql = initMySql(null, port);
Pipeline pipeline = initPipeline(mysql.getContainerIpAddress(), port);
// when job starts
JetInstance jet = createJetMembers(2)[0];
Job job = jet.newJob(pipeline);
assertJobStatusEventually(job, RUNNING);
// and snapshotting is ongoing (we have no exact way of identifying
// the moment, but random sleep will catch it at least some of the time)
MILLISECONDS.sleep(ThreadLocalRandom.current().nextInt(100, 500));
// and DB is stopped
stopContainer(mysql);
mysql = null;
boolean neverReconnect = reconnectBehavior.getMaxAttempts() == 0;
if (neverReconnect) {
// then job fails
assertJobFailsWithConnectException(job, true);
} else {
// and DB is started anew
mysql = initMySql(null, port);
// then snapshotting finishes successfully
try {
assertEqualsEventually(() -> jet.getMap("results").size(), 4);
assertEquals(RUNNING, job.getStatus());
} finally {
abortJob(job);
}
}
}
@Test
public void when_networkDisconnectDuringBinlogRead_then_connectorReconnectsInternally() throws Exception {
try (
Network network = initNetwork();
ToxiproxyContainer toxiproxy = initToxiproxy(network);
) {
mysql = initMySql(network, null);
ToxiproxyContainer.ContainerProxy proxy = initProxy(toxiproxy, mysql);
Pipeline pipeline = initPipeline(proxy.getContainerIpAddress(), proxy.getProxyPort());
// when connector is up and transitions to binlog reading
JetInstance jet = createJetMembers(2)[0];
Job job = jet.newJob(pipeline);
assertEqualsEventually(() -> jet.getMap("results").size(), 4);
SECONDS.sleep(3);
insertRecords(mysql, 1005);
assertEqualsEventually(() -> jet.getMap("results").size(), 5);
// and the connection is cut
proxy.setConnectionCut(true);
// and some new events get generated in the DB
insertRecords(mysql, 1006, 1007);
// and some time passes
MILLISECONDS.sleep(2 * RECONNECT_INTERVAL_MS);
// and the connection is re-established
proxy.setConnectionCut(false);
// then the connector catches up
try {
assertEqualsEventually(() -> jet.getMap("results").size(), 7);
} finally {
abortJob(job);
}
}
}
@Test
public void when_databaseShutdownDuringBinlogReading() throws Exception {
int port = findRandomOpenPort();
mysql = initMySql(null, port);
Pipeline pipeline = initPipeline(mysql.getContainerIpAddress(), port);
// when connector is up and transitions to binlog reading
JetInstance jet = createJetMembers(2)[0];
Job job = jet.newJob(pipeline);
assertEqualsEventually(() -> jet.getMap("results").size(), 4);
SECONDS.sleep(3);
insertRecords(mysql, 1005);
assertEqualsEventually(() -> jet.getMap("results").size(), 5);
// and DB is stopped
stopContainer(mysql);
mysql = null;
boolean neverReconnect = reconnectBehavior.getMaxAttempts() == 0;
if (neverReconnect) {
// then job fails
assertJobFailsWithConnectException(job, true);
} else {
// and results are cleared
jet.getMap("results").clear();
assertEqualsEventually(() -> jet.getMap("results").size(), 0);
// and DB is started anew
mysql = initMySql(null, port);
insertRecords(mysql, 1005, 1006, 1007);
try {
if (resetStateOnReconnect) {
// then job keeps running, connector starts freshly, including snapshotting
assertEqualsEventually(() -> jet.getMap("results").size(), 7);
assertEquals(RUNNING, job.getStatus());
} else {
assertEqualsEventually(() -> jet.getMap("results").size(), 2);
assertEquals(RUNNING, job.getStatus());
}
} finally {
abortJob(job);
}
}
}
private StreamSource<ChangeRecord> source(String host, int port) {
return MySqlCdcSources.mysql("customers")
.setDatabaseAddress(host)
.setDatabasePort(port)
.setDatabaseUser("debezium")
.setDatabasePassword("dbz")
.setClusterName("dbserver1").setDatabaseWhitelist("inventory")
.setTableWhitelist("inventory." + "customers")
.setReconnectBehavior(reconnectBehavior)
.setShouldStateBeResetOnReconnect(resetStateOnReconnect)
.build();
}
private Pipeline initPipeline(String host, int port) {
Pipeline pipeline = Pipeline.create();
pipeline.readFrom(source(host, port))
.withNativeTimestamps(0)
.map(r -> entry(r.key().toMap().get("id"), r.value().toJson()))
.writeTo(Sinks.map("results"));
return pipeline;
}
private void abortJob(Job job) {
try {
job.cancel();
job.join();
} catch (Exception e) {
// ignore, cancellation exception expected
}
}
private MySQLContainer<?> initMySql(Network network, Integer fixedExposedPort) {
MySQLContainer<?> mysql = namedTestContainer(
new MySQLContainer<>("debezium/example-mysql:1.2")
.withUsername("mysqluser")
.withPassword("mysqlpw")
);
if (fixedExposedPort != null) {
Consumer<CreateContainerCmd> cmd = e -> e.withPortBindings(
new PortBinding(Ports.Binding.bindPort(fixedExposedPort), new ExposedPort(MYSQL_PORT)));
mysql = mysql.withCreateContainerCmdModifier(cmd);
}
if (network != null) {
mysql = mysql.withNetwork(network);
}
mysql.start();
return mysql;
}
private ToxiproxyContainer initToxiproxy(Network network) {
ToxiproxyContainer toxiproxy = namedTestContainer(new ToxiproxyContainer().withNetwork(network));
toxiproxy.start();
return toxiproxy;
}
private static Network initNetwork() {
return Network.newNetwork();
}
private static ToxiproxyContainer.ContainerProxy initProxy(ToxiproxyContainer toxiproxy, MySQLContainer<?> mysql) {
return toxiproxy.getProxy(mysql, MYSQL_PORT);
}
private static void insertRecords(MySQLContainer<?> mysql, int... ids) throws SQLException {
try (Connection connection = DriverManager.getConnection(mysql.withDatabaseName("inventory").getJdbcUrl(),
mysql.getUsername(), mysql.getPassword())) {
connection.setAutoCommit(false);
Statement statement = connection.createStatement();
for (int id : ids) {
statement.addBatch("INSERT INTO customers VALUES (" + id + ", 'Jason', 'Bourne', " +
"'jason" + id + "@bourne.org')");
}
statement.executeBatch();
connection.commit();
}
}
private static int findRandomOpenPort() throws Exception {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
}
}
private static int findRandomOpenPortInRange(int fromInclusive, int toExclusive) throws IOException {
List<Integer> randomizedPortsInRange = IntStream.range(fromInclusive, toExclusive)
.boxed()
.collect(Collectors.toList());
Collections.shuffle(randomizedPortsInRange);
for (int port : randomizedPortsInRange) {
try {
ServerSocket serverSocket = new ServerSocket(port);
serverSocket.close();
return port;
} catch (Exception e) {
//swallow, expected
}
}
throw new IOException("No free port in range [" + fromInclusive + ", " + toExclusive + ")");
}
@SuppressWarnings("StatementWithEmptyBody")
private static void assertJobFailsWithConnectException(Job job, boolean lenient) throws InterruptedException {
try {
//wait for job to finish w/ timeout
job.getFuture().get(5, SECONDS);
} catch (TimeoutException te) {
//explicitly cancelling the job because it has not completed so far
job.cancel();
if (lenient) {
//ignore the timeout; not all tests are deterministic, sometimes we don't end up in the state
//we actually want to test
} else {
fail("Connection failure not thrown");
}
} catch (ExecutionException ee) {
//job completed exceptionally, as expected, we check the details of it
assertThat(ee)
.hasRootCauseInstanceOf(JetException.class)
.hasStackTraceContaining("Failed to connect to database");
}
}
}
|
[
"\"TESTCONTAINERS_RYUK_DISABLED\""
] |
[] |
[
"TESTCONTAINERS_RYUK_DISABLED"
] |
[]
|
["TESTCONTAINERS_RYUK_DISABLED"]
|
java
| 1 | 0 | |
cmd/frontend/internal/httpapi/auth_test.go
|
package httpapi
import (
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/sourcegraph/sourcegraph/cmd/frontend/authz"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db"
"github.com/sourcegraph/sourcegraph/cmd/frontend/types"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/errcode"
)
func TestAccessTokenAuthMiddleware(t *testing.T) {
handler := AccessTokenAuthMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
actor := actor.FromContext(r.Context())
if actor.IsAuthenticated() {
fmt.Fprintf(w, "user %v", actor.UID)
} else {
fmt.Fprint(w, "no user")
}
}))
checkHTTPResponse := func(t *testing.T, req *http.Request, wantStatusCode int, wantBody string) {
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if rr.Code != wantStatusCode {
t.Errorf("got response status %d, want %d", rr.Code, wantStatusCode)
}
if got := rr.Body.String(); got != wantBody {
t.Errorf("got response body %q, want %q", got, wantBody)
}
}
t.Run("no header", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
checkHTTPResponse(t, req, http.StatusOK, "no user")
})
// Test that the absence of an Authorization header doesn't unset the actor provided by a prior
// auth middleware.
t.Run("no header, actor present", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req = req.WithContext(actor.WithActor(context.Background(), &actor.Actor{UID: 123}))
checkHTTPResponse(t, req, http.StatusOK, "user 123")
})
for _, unrecognizedHeaderValue := range []string{"x", "x y", "Basic abcd"} {
t.Run("unrecognized header "+unrecognizedHeaderValue, func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", unrecognizedHeaderValue)
checkHTTPResponse(t, req, http.StatusOK, "no user")
})
}
for _, invalidHeaderValue := range []string{"token-sudo abc", `token-sudo token=""`, "token "} {
t.Run("invalid header "+invalidHeaderValue, func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", invalidHeaderValue)
checkHTTPResponse(t, req, http.StatusUnauthorized, "Invalid Authorization header.\n")
})
}
t.Run("valid header with invalid token", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", "token badbad")
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
return 0, errors.New("x")
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusUnauthorized, "Invalid access token.\n")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
})
for _, headerValue := range []string{"token abcdef", `token token="abcdef"`} {
t.Run("valid non-sudo token: "+headerValue, func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", headerValue)
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeUserAll; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusOK, "user 123")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
})
}
// Test that an access token overwrites the actor set by a prior auth middleware.
t.Run("actor present, valid non-sudo token", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", "token abcdef")
req = req.WithContext(actor.WithActor(context.Background(), &actor.Actor{UID: 456}))
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeUserAll; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusOK, "user 123")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
})
// Test that an access token overwrites the actor set by a prior auth middleware.
const (
sourceQueryParam = "query-param"
sourceBasicAuth = "basic-auth"
)
for _, source := range []string{sourceQueryParam, sourceBasicAuth} {
t.Run("actor present, valid non-sudo token in "+source, func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
if source == sourceQueryParam {
q := url.Values{}
q.Add("token", "abcdef")
req.URL.RawQuery = q.Encode()
} else {
req.SetBasicAuth("abcdef", "")
}
req = req.WithContext(actor.WithActor(context.Background(), &actor.Actor{UID: 456}))
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeUserAll; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusOK, "user 123")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
})
}
t.Run("valid sudo token", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", `token-sudo token="abcdef",user="alice"`)
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeSiteAdminSudo; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
var calledUsersGetByID bool
db.Mocks.Users.GetByID = func(ctx context.Context, userID int32) (*types.User, error) {
calledUsersGetByID = true
if want := int32(123); userID != want {
t.Errorf("got %d, want %d", userID, want)
}
return &types.User{ID: userID, SiteAdmin: true}, nil
}
var calledUsersGetByUsername bool
db.Mocks.Users.GetByUsername = func(ctx context.Context, username string) (*types.User, error) {
calledUsersGetByUsername = true
if want := "alice"; username != want {
t.Errorf("got %q, want %q", username, want)
}
return &types.User{ID: 456, SiteAdmin: true}, nil
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusOK, "user 456")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
if !calledUsersGetByID {
t.Error("!calledUsersGetByID")
}
if !calledUsersGetByUsername {
t.Error("!calledUsersGetByUsername")
}
})
// Test that if a sudo token's subject user is not a site admin (which means they were demoted
// from site admin AFTER the token was created), then the sudo token is invalid.
t.Run("valid sudo token, subject is not site admin", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", `token-sudo token="abcdef",user="alice"`)
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeSiteAdminSudo; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
var calledUsersGetByID bool
db.Mocks.Users.GetByID = func(ctx context.Context, userID int32) (*types.User, error) {
calledUsersGetByID = true
if want := int32(123); userID != want {
t.Errorf("got %d, want %d", userID, want)
}
return &types.User{ID: userID, SiteAdmin: false}, nil
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusForbidden, "The subject user of a sudo access token must be a site admin.\n")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
if !calledUsersGetByID {
t.Error("!calledUsersGetByID")
}
})
t.Run("valid sudo token, invalid sudo user", func(t *testing.T) {
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Set("Authorization", `token-sudo token="abcdef",user="doesntexist"`)
var calledAccessTokensLookup bool
db.Mocks.AccessTokens.Lookup = func(tokenHexEncoded, requiredScope string) (subjectUserID int32, err error) {
calledAccessTokensLookup = true
if want := "abcdef"; tokenHexEncoded != want {
t.Errorf("got %q, want %q", tokenHexEncoded, want)
}
if want := authz.ScopeSiteAdminSudo; requiredScope != want {
t.Errorf("got %q, want %q", requiredScope, want)
}
return 123, nil
}
var calledUsersGetByID bool
db.Mocks.Users.GetByID = func(ctx context.Context, userID int32) (*types.User, error) {
calledUsersGetByID = true
if want := int32(123); userID != want {
t.Errorf("got %d, want %d", userID, want)
}
return &types.User{ID: userID, SiteAdmin: true}, nil
}
var calledUsersGetByUsername bool
db.Mocks.Users.GetByUsername = func(ctx context.Context, username string) (*types.User, error) {
calledUsersGetByUsername = true
if want := "doesntexist"; username != want {
t.Errorf("got %q, want %q", username, want)
}
return nil, &errcode.Mock{IsNotFound: true}
}
defer func() { db.Mocks = db.MockStores{} }()
checkHTTPResponse(t, req, http.StatusForbidden, "Unable to sudo to nonexistent user.\n")
if !calledAccessTokensLookup {
t.Error("!calledAccessTokensLookup")
}
if !calledUsersGetByID {
t.Error("!calledUsersGetByID")
}
if !calledUsersGetByUsername {
t.Error("!calledUsersGetByUsername")
}
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
google_images_download/google_images_download.py
|
#!/usr/bin/env python
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import sys
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
import http.client
from http.client import IncompleteRead, BadStatusLine
http.client._MAXHEADERS = 1000
else: # If the Current Version of Python is 2.x
import urllib2
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import httplib
from httplib import IncompleteRead, BadStatusLine
httplib._MAXHEADERS = 1000
import time # Importing the time library to check the time of code execution
import os
import argparse
import ssl
import datetime
import json
import re
import codecs
import socket
args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords",
"limit", "format", "color", "color_type", "usage_rights", "size",
"exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image",
"output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site",
"print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout",
"thumbnail", "thumbnail_only", "language", "prefix", "chromedriver", "related_images", "safe_search",
"no_numbering",
"offset", "no_download", "save_source", "silent_mode", "ignore_urls"]
def user_input():
config = argparse.ArgumentParser()
config.add_argument('-cf', '--config_file', help='config file name', default='', type=str, required=False)
config_file_check = config.parse_known_args()
object_check = vars(config_file_check[0])
if object_check['config_file'] != '':
records = []
json_file = json.load(open(config_file_check[0].config_file))
for record in range(0, len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
records_count = len(records)
else:
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False)
parser.add_argument('-kf', '--keywords_from_file', help='extract list of keywords from a text file', type=str,
required=False)
parser.add_argument('-sk', '--suffix_keywords',
help='comma separated additional words added after to main keyword', type=str,
required=False)
parser.add_argument('-pk', '--prefix_keywords',
help='comma separated additional words added before main keyword', type=str, required=False)
parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False)
parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False,
choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico'])
parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False)
parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str,
required=False)
parser.add_argument('-o', '--output_directory', help='download images in a specific main directory', type=str,
required=False)
parser.add_argument('-i', '--image_directory', help='download images in a specific sub-directory', type=str,
required=False)
parser.add_argument('-n', '--no_directory', default=False,
help='download images in the main directory but no sub-directory', action="store_true")
parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=int,
required=False)
parser.add_argument('-co', '--color', help='filter on color', type=str, required=False,
choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white',
'gray', 'black', 'brown'])
parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False,
choices=['full-color', 'black-and-white', 'transparent'])
parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False,
choices=['labeled-for-reuse-with-modifications', 'labeled-for-reuse',
'labeled-for-noncommercial-reuse-with-modification',
'labeled-for-nocommercial-reuse'])
parser.add_argument('-s', '--size', help='image size', type=str, required=False,
choices=['large', 'medium', 'icon', '>400*300', '>640*480', '>800*600', '>1024*768', '>2MP',
'>4MP', '>6MP', '>8MP', '>10MP', '>12MP', '>15MP', '>20MP', '>40MP', '>70MP'])
parser.add_argument('-es', '--exact_size', help='exact image resolution "WIDTH,HEIGHT"', type=str,
required=False)
parser.add_argument('-t', '--type', help='image type', type=str, required=False,
choices=['face', 'photo', 'clipart', 'line-drawing', 'animated'])
parser.add_argument('-w', '--time', help='image age', type=str, required=False,
choices=['past-24-hours', 'past-7-days', 'past-month', 'past-year'])
parser.add_argument('-wr', '--time_range',
help='time range for the age of the image. should be in the format {"time_min":"YYYY-MM-DD","time_max":"YYYY-MM-DD"}',
type=str, required=False)
parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str,
required=False,
choices=['tall', 'square', 'wide', 'panoramic'])
parser.add_argument('-si', '--similar_images',
help='downloads images very similar to the image URL you provide', type=str, required=False)
parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website',
type=str, required=False)
parser.add_argument('-p', '--print_urls', default=False, help="Print the URLs of the images",
action="store_true")
parser.add_argument('-ps', '--print_size', default=False, help="Print the size of the images on disk",
action="store_true")
parser.add_argument('-pp', '--print_paths', default=False,
help="Prints the list of absolute paths of the images", action="store_true")
parser.add_argument('-m', '--metadata', default=False, help="Print the metadata of the image",
action="store_true")
parser.add_argument('-e', '--extract_metadata', default=False, help="Dumps all the logs into a text file",
action="store_true")
parser.add_argument('-st', '--socket_timeout', default=False,
help="Connection timeout waiting for the image to download", type=float)
parser.add_argument('-th', '--thumbnail', default=False,
help="Downloads image thumbnail along with the actual image", action="store_true")
parser.add_argument('-tho', '--thumbnail_only', default=False,
help="Downloads only thumbnail without downloading actual images", action="store_true")
parser.add_argument('-la', '--language', default=False,
help="Defines the language filter. The search results are authomatically returned in that language",
type=str, required=False,
choices=['Arabic', 'Chinese (Simplified)', 'Chinese (Traditional)', 'Czech', 'Danish',
'Dutch', 'English', 'Estonian', 'Finnish', 'French', 'German', 'Greek', 'Hebrew',
'Hungarian', 'Icelandic', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Lithuanian',
'Norwegian', 'Portuguese', 'Polish', 'Romanian', 'Russian', 'Spanish', 'Swedish',
'Turkish'])
parser.add_argument('-pr', '--prefix', default=False,
help="A word that you would want to prefix in front of each image name", type=str,
required=False)
parser.add_argument('-px', '--proxy', help='specify a proxy address and port', type=str, required=False)
parser.add_argument('-cd', '--chromedriver',
help='specify the path to chromedriver executable in your local machine', type=str,
required=False)
parser.add_argument('-ri', '--related_images', default=False,
help="Downloads images that are similar to the keyword provided", action="store_true")
parser.add_argument('-sa', '--safe_search', default=False,
help="Turns on the safe search filter while searching for images", action="store_true")
parser.add_argument('-nn', '--no_numbering', default=False,
help="Allows you to exclude the default numbering of images", action="store_true")
parser.add_argument('-of', '--offset', help="Where to start in the fetched links", type=str, required=False)
parser.add_argument('-nd', '--no_download', default=False,
help="Prints the URLs of the images and/or thumbnails without downloading them",
action="store_true")
parser.add_argument('-iu', '--ignore_urls', default=False,
help="delimited list input of image urls/keywords to ignore", type=str)
parser.add_argument('-sil', '--silent_mode', default=False,
help="Remains silent. Does not print notification messages on the terminal",
action="store_true")
parser.add_argument('-is', '--save_source',
help="creates a text file containing a list of downloaded images along with source page url",
type=str, required=False)
args = parser.parse_args()
arguments = vars(args)
records = []
records.append(arguments)
return records
class googleimagesdownload:
def __init__(self):
pass
def _extract_data_pack(self, page):
start_line = page.find("AF_initDataCallback({key: \\'ds:1\\'") - 10
start_object = page.find('[', start_line + 1)
end_object = page.rfind(']',0,page.find('</script>', start_object + 1))+1
object_raw = str(page[start_object:end_object])
return bytes(object_raw, "utf-8").decode("unicode_escape")
def _extract_data_pack_extended(self, page):
start_line = page.find("AF_initDataCallback({key: 'ds:1'") - 10
start_object = page.find('[', start_line + 1)
end_object = page.rfind(']',0,page.find('</script>', start_object + 1)) + 1
return str(page[start_object:end_object])
def _extract_data_pack_ajax(self, data):
lines = data.split('\n')
return json.loads(lines[3])[0][2]
def _image_objects_from_pack(self, data):
image_objects = json.loads(data)[31][-1][12][2]
image_objects = [x for x in image_objects if x[0] == 1]
return image_objects
# Downloading entire Web Document (Raw Page Content)
def download_page(self, url):
version = (3, 0)
cur_version = sys.version_info
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
except:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
else: # If the Current Version of Python is 2.x
try:
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
respData = response.read()
except:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
return "Page Not found"
try:
return self._image_objects_from_pack(self._extract_data_pack(respData)), self.get_all_tabs(respData)
except Exception as e:
print(e)
print('Image objects data unpacking failed. Please leave a comment with the above error at https://github.com/hardikvasa/google-images-download/pull/298')
sys.exit()
# Download Page for more than 100 images
def download_extended_page(self, url, chromedriver="/usr/lib/chromium-browser/chromedriver"):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
chrome_service = Service(chromedriver)
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf8')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
try:
browser = webdriver.Chrome(service=chromedriver, options=options)
except Exception as e:
print("Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' "
"argument to specify the path to the executable.) or google chrome browser is not "
"installed on your machine (exception: %s)" % e)
sys.exit()
browser.set_window_size(1024, 768)
# Open the link
browser.get(url)
browser.execute_script("""
(function(XHR){
"use strict";
var open = XHR.prototype.open;
var send = XHR.prototype.send;
var data = [];
XHR.prototype.open = function(method, url, async, user, pass) {
this._url = url;
open.call(this, method, url, async, user, pass);
}
XHR.prototype.send = function(data) {
var self = this;
var url = this._url;
function stateChanged() {
if (self.readyState == 4) {
console.log("data available for: " + url)
XHR.prototype._data.push(self.response);
}
}
if (url.includes("/batchexecute?")) {
this.addEventListener("readystatechange", stateChanged, false);
}
send.call(this, data);
};
XHR.prototype._data = [];
})(XMLHttpRequest);
""")
time.sleep(1)
print("Getting you a lot of images. This may take a few moments...")
element = browser.find_element(By.TAG_NAME, "body")
# Scroll down
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
try:
browser.find_element(By.XPATH, '//input[@value="Show more results"]').click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
except:
for i in range(10):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
print("Reached end of Page.")
time.sleep(0.5)
source = browser.page_source # page source
images = self._image_objects_from_pack(self._extract_data_pack_extended(source))
ajax_data = browser.execute_script("return XMLHttpRequest.prototype._data")
for chunk in ajax_data:
images += self._image_objects_from_pack(self._extract_data_pack_ajax(chunk))
# close the browser
browser.close()
return images, self.get_all_tabs(source)
# Correcting the escape characters for python2
def replace_with_byte(self, match):
return chr(int(match.group(0)[1:], 8))
def repair(self, brokenjson):
invalid_escape = re.compile(r'\\[0-7]{1,3}') # up to 3 digits for byte values up to FF
return invalid_escape.sub(self.replace_with_byte, brokenjson)
# Finding 'Next Image' from the given raw page
def get_next_tab(self, s):
start_line = s.find('class="dtviD"')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_tabs"
return link, '', end_quote
else:
start_line = s.find('class="dtviD"')
start_content = s.find('href="', start_line + 1)
end_content = s.find('">', start_content + 1)
url_item = "https://www.google.com" + str(s[start_content + 6:end_content])
url_item = url_item.replace('&', '&')
start_line_2 = s.find('class="dtviD"')
s = s.replace('&', '&')
start_content_2 = s.find(':', start_line_2 + 1)
end_content_2 = s.find('&usg=', start_content_2 + 1)
url_item_name = str(s[start_content_2 + 1:end_content_2])
chars = url_item_name.find(',g_1:')
chars_end = url_item_name.find(":", chars + 6)
if chars_end == -1:
updated_item_name = (url_item_name[chars + 5:]).replace("+", " ")
else:
updated_item_name = (url_item_name[chars + 5:chars_end]).replace("+", " ")
return url_item, updated_item_name, end_content
# Getting all links with the help of '_images_get_next_image'
def get_all_tabs(self, page):
tabs = {}
while True:
item, item_name, end_content = self.get_next_tab(page)
if item == "no_tabs":
break
else:
if len(item_name) > 100 or item_name == "background-color":
break
else:
tabs[item_name] = item # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return tabs
# Format the object in readable format
def format_object(self, object):
data = object[1]
main = data[3]
info = data[9]
if info is None:
info = data[11]
formatted_object = {}
try:
formatted_object['image_height'] = main[2]
formatted_object['image_width'] = main[1]
formatted_object['image_link'] = main[0]
formatted_object['image_format'] = main[0][-1 * (len(main[0]) - main[0].rfind(".") - 1):]
formatted_object['image_description'] = info['2003'][3]
formatted_object['image_host'] = info['2003'][17]
formatted_object['image_source'] = info['2003'][2]
formatted_object['image_thumbnail_url'] = data[2][0]
except Exception as e:
print(e)
return None
return formatted_object
# function to download single image
def single_image(self, image_url):
main_directory = "downloads"
extensions = (".jpg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
url = image_url
try:
os.makedirs(main_directory)
except OSError as e:
if e.errno != 17:
raise
pass
req = Request(url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 10)
data = response.read()
response.close()
image_name = str(url[(url.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
# if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
if any(map(lambda extension: extension in image_name, extensions)):
file_name = main_directory + "/" + image_name
else:
file_name = main_directory + "/" + image_name + ".jpg"
image_name = image_name + ".jpg"
try:
output_file = open(file_name, 'wb')
output_file.write(data)
output_file.close()
except IOError as e:
raise e
except OSError as e:
raise e
print("completed ====> " + image_name.encode('raw_unicode_escape').decode('utf-8'))
return
def similar_images(self, similar_images):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib.request.Request(newurl, headers=headers)
resp2 = urllib.request.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return urll2
except:
return "Cloud not connect to Google Images endpoint"
else: # If the Current Version of Python is 2.x
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req1 = urllib2.Request(searchUrl, headers=headers)
resp1 = urllib2.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib2.Request(newurl, headers=headers)
resp2 = urllib2.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return (urll2)
except:
return "Cloud not connect to Google Images endpoint"
# Building URL parameters
def build_url_parameters(self, arguments):
if arguments['language']:
lang = "&lr="
lang_param = {"Arabic": "lang_ar", "Chinese (Simplified)": "lang_zh-CN",
"Chinese (Traditional)": "lang_zh-TW", "Czech": "lang_cs", "Danish": "lang_da",
"Dutch": "lang_nl", "English": "lang_en", "Estonian": "lang_et", "Finnish": "lang_fi",
"French": "lang_fr", "German": "lang_de", "Greek": "lang_el", "Hebrew": "lang_iw ",
"Hungarian": "lang_hu", "Icelandic": "lang_is", "Italian": "lang_it", "Japanese": "lang_ja",
"Korean": "lang_ko", "Latvian": "lang_lv", "Lithuanian": "lang_lt", "Norwegian": "lang_no",
"Portuguese": "lang_pt", "Polish": "lang_pl", "Romanian": "lang_ro", "Russian": "lang_ru",
"Spanish": "lang_es", "Swedish": "lang_sv", "Turkish": "lang_tr"}
lang_url = lang + lang_param[arguments['language']]
else:
lang_url = ''
if arguments['exact_size']:
size_array = [x.strip() for x in arguments['exact_size'].split(',')]
exact_size = ",isz:ex,iszw:" + str(size_array[0]) + ",iszh:" + str(size_array[1])
else:
exact_size = ''
built_url = "&tbs="
counter = 0
params = {'color': [arguments['color'], {'red': 'ic:specific,isc:red', 'orange': 'ic:specific,isc:orange',
'yellow': 'ic:specific,isc:yellow', 'green': 'ic:specific,isc:green',
'teal': 'ic:specific,isc:teel', 'blue': 'ic:specific,isc:blue',
'purple': 'ic:specific,isc:purple', 'pink': 'ic:specific,isc:pink',
'white': 'ic:specific,isc:white', 'gray': 'ic:specific,isc:gray',
'black': 'ic:specific,isc:black', 'brown': 'ic:specific,isc:brown'}],
'color_type': [arguments['color_type'],
{'full-color': 'ic:color', 'black-and-white': 'ic:gray', 'transparent': 'ic:trans'}],
'usage_rights': [arguments['usage_rights'],
{'labeled-for-reuse-with-modifications': 'sur:fmc', 'labeled-for-reuse': 'sur:fc',
'labeled-for-noncommercial-reuse-with-modification': 'sur:fm',
'labeled-for-nocommercial-reuse': 'sur:f'}],
'size': [arguments['size'],
{'large': 'isz:l', 'medium': 'isz:m', 'icon': 'isz:i', '>400*300': 'isz:lt,islt:qsvga',
'>640*480': 'isz:lt,islt:vga', '>800*600': 'isz:lt,islt:svga',
'>1024*768': 'visz:lt,islt:xga', '>2MP': 'isz:lt,islt:2mp', '>4MP': 'isz:lt,islt:4mp',
'>6MP': 'isz:lt,islt:6mp', '>8MP': 'isz:lt,islt:8mp', '>10MP': 'isz:lt,islt:10mp',
'>12MP': 'isz:lt,islt:12mp', '>15MP': 'isz:lt,islt:15mp', '>20MP': 'isz:lt,islt:20mp',
'>40MP': 'isz:lt,islt:40mp', '>70MP': 'isz:lt,islt:70mp'}],
'type': [arguments['type'], {'face': 'itp:face', 'photo': 'itp:photo', 'clipart': 'itp:clipart',
'line-drawing': 'itp:lineart', 'animated': 'itp:animated'}],
'time': [arguments['time'], {'past-24-hours': 'qdr:d', 'past-7-days': 'qdr:w', 'past-month': 'qdr:m',
'past-year': 'qdr:y'}],
'aspect_ratio': [arguments['aspect_ratio'],
{'tall': 'iar:t', 'square': 'iar:s', 'wide': 'iar:w', 'panoramic': 'iar:xw'}],
'format': [arguments['format'],
{'jpg': 'ift:jpg', 'gif': 'ift:gif', 'png': 'ift:png', 'bmp': 'ift:bmp', 'svg': 'ift:svg',
'webp': 'webp', 'ico': 'ift:ico', 'raw': 'ift:craw'}]}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url = built_url + ext_param
counter += 1
else:
built_url = built_url + ',' + ext_param
counter += 1
built_url = lang_url + built_url + exact_size
return built_url
# building main search URL
def build_search_url(self, search_term, params, url, similar_images, specific_site, safe_search):
# check safe_search
safe_search_string = "&safe=active"
# check the args and choose the URL
if url:
url = url
elif similar_images:
print(similar_images)
keywordem = self.similar_images(similar_images)
url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
elif specific_site:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode(
'utf-8')) + '&as_sitesearch=' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
else:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode(
'utf-8')) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
# safe search check
if safe_search:
url = url + safe_search_string
return url
# measures the file size
def file_size(self, file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
size = file_info.st_size
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return size
# keywords from file
def keywords_from_file(self, file_name):
search_keyword = []
with codecs.open(file_name, 'r', encoding='utf-8-sig') as f:
if '.csv' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
elif '.txt' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
else:
print("Invalid file type: Valid file types are either .txt or .csv \n"
"exiting...")
sys.exit()
return search_keyword
# make directories
def create_directories(self, main_directory, dir_name, thumbnail, thumbnail_only):
dir_name_thumbnail = dir_name + " - thumbnail"
# make a search keyword directory
try:
if not os.path.exists(main_directory):
os.makedirs(main_directory)
time.sleep(0.15)
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
else:
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
except OSError as e:
if e.errno != 17:
raise
pass
return
# Download Image thumbnails
def download_image_thumbnail(self, image_url, main_directory, dir_name, return_image_name, print_urls,
socket_timeout, print_size, no_download, save_source, img_src, ignore_urls):
if print_urls or no_download:
print("Image URL: " + image_url)
if no_download:
return "success", "Printed url without downloading"
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
path = main_directory + "/" + dir_name + " - thumbnail" + "/" + return_image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path, 'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
except IOError as e:
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
download_status = 'success'
download_message = "Completed Image Thumbnail ====> " + return_image_name
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return download_status, download_message
# Download Images
def download_image(self, image_url, image_format, main_directory, dir_name, count, print_urls, socket_timeout,
prefix, print_size, no_numbering, no_download, save_source, img_src, silent_mode, thumbnail_only,
format, ignore_urls):
if not silent_mode:
if print_urls or no_download:
print("Image URL: " + image_url)
if ignore_urls:
if any(url in image_url for url in ignore_urls.split(',')):
return "fail", "Image ignored due to 'ignore url' parameter", None, image_url
if thumbnail_only:
return "success", "Skipping image download...", str(image_url[(image_url.rfind('/')) + 1:]), image_url
if no_download:
return "success", "Printed url without downloading", None, image_url
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
info = response.info()
response.close()
qmark = image_url.rfind('?')
if qmark == -1:
qmark = len(image_url)
slash = image_url.rfind('/', 0, qmark) + 1
image_name = str(image_url[slash:qmark]).lower()
type = info.get_content_type()
if type == "image/jpeg" or type == "image/jpg":
if not image_name.endswith(".jpg") and not image_name.endswith(".jpeg"):
image_name += ".jpg"
elif type == "image/png":
if not image_name.endswith(".png"):
image_name += ".png"
elif type == "image/webp":
if not image_name.endswith(".webp"):
image_name += ".webp"
elif type == "image/gif":
if not image_name.endswith(".gif"):
image_name += ".gif"
elif type == "image/bmp" or type == "image/x-windows-bmp":
if not image_name.endswith(".bmp"):
image_name += ".bmp"
elif type == "image/x-icon" or type == "image/vnd.microsoft.icon":
if not image_name.endswith(".ico"):
image_name += ".ico"
elif type == "image/svg+xml":
if not image_name.endswith(".svg"):
image_name += ".svg"
else:
download_status = 'fail'
download_message = "Invalid image format '" + type + "'. Skipping..."
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
# prefix name in image
if prefix:
prefix = prefix + " "
else:
prefix = ''
if no_numbering:
path = main_directory + "/" + dir_name + "/" + prefix + image_name
else:
path = main_directory + "/" + dir_name + "/" + prefix + str(count) + "." + image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path, 'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
absolute_path = os.path.abspath(path)
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
# return image name back to calling method to use it for thumbnail downloads
download_status = 'success'
download_message = "Completed Image ====> " + prefix + str(count) + "." + image_name
return_image_name = prefix + str(count) + "." + image_name
# image size parameter
if not silent_mode:
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except BadStatusLine as e:
download_status = 'fail'
download_message = "BadStatusLine on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IncompleteRead as e:
download_status = 'fail'
download_message = "IncompleteReadError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
def _get_all_items(self, image_objects, main_directory, dir_name, limit, arguments):
items = []
abs_path = []
errorCount = 0
i = 0
count = 1
while count < limit + 1 and i < len(image_objects):
if len(image_objects) == 0:
print("no_links")
break
#code added here to attempt to implement offset correctly
#was "count < int(arguments['offset'])" in hardikvasa code, this seems
# to be contrary to the implementation details.
elif arguments['offset'] and count <= int(arguments['offset']):
count += 1
#page = page[end_content:]
else:
# format the item for readability
object = self.format_object(image_objects[i])
if arguments['metadata']:
if not arguments["silent_mode"]:
print("\nImage Metadata: " + str(object))
# download the images
download_status, download_message, return_image_name, absolute_path = self.download_image(
object['image_link'], object['image_format'], main_directory, dir_name, count,
arguments['print_urls'], arguments['socket_timeout'], arguments['prefix'], arguments['print_size'],
arguments['no_numbering'], arguments['no_download'], arguments['save_source'],
object['image_source'], arguments["silent_mode"], arguments["thumbnail_only"], arguments['format'],
arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message)
if download_status == "success":
# download image_thumbnails
if arguments['thumbnail'] or arguments["thumbnail_only"]:
download_status, download_message_thumbnail = self.download_image_thumbnail(
object['image_thumbnail_url'], main_directory, dir_name, return_image_name,
arguments['print_urls'], arguments['socket_timeout'], arguments['print_size'],
arguments['no_download'], arguments['save_source'], object['image_source'],
arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message_thumbnail)
count += 1
object['image_filename'] = return_image_name
items.append(object) # Append all the links in the list named 'Links'
abs_path.append(absolute_path)
else:
errorCount += 1
# delay param
if arguments['delay']:
time.sleep(int(arguments['delay']))
i += 1
if count < limit:
print("\n\nUnfortunately all " + str(
limit) + " could not be downloaded because some images were not downloadable. " + str(
count - 1) + " is all we got for this search filter!")
return items, errorCount, abs_path
# Bulk Download
def download(self, arguments):
paths_agg = {}
# for input coming from other python files
if __name__ != "__main__":
# if the calling file contains config_file param
if 'config_file' in arguments:
records = []
json_file = json.load(open(arguments['config_file']))
for record in range(0, len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
total_errors = 0
for rec in records:
paths, errors = self.download_executor(rec)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
total_errors = total_errors + errors
return paths_agg, total_errors
# if the calling file contains params directly
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
# for input coming from CLI
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
def download_executor(self, arguments):
paths = {}
errorCount = None
for arg in args_list:
if arg not in arguments:
arguments[arg] = None
######Initialization and Validation of user arguments
if arguments['keywords']:
search_keyword = [str(item) for item in arguments['keywords'].split(',')]
if arguments['keywords_from_file']:
search_keyword = self.keywords_from_file(arguments['keywords_from_file'])
# both time and time range should not be allowed in the same query
if arguments['time'] and arguments['time_range']:
raise ValueError(
'Either time or time range should be used in a query. Both cannot be used at the same time.')
# both time and time range should not be allowed in the same query
if arguments['size'] and arguments['exact_size']:
raise ValueError(
'Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.')
# both image directory and no image directory should not be allowed in the same query
if arguments['image_directory'] and arguments['no_directory']:
raise ValueError('You can either specify image directory or specify no image directory, not both!')
# Additional words added to keywords
if arguments['suffix_keywords']:
suffix_keywords = [" " + str(sk) for sk in arguments['suffix_keywords'].split(',')]
else:
suffix_keywords = ['']
# Additional words added to keywords
if arguments['prefix_keywords']:
prefix_keywords = [str(sk) + " " for sk in arguments['prefix_keywords'].split(',')]
else:
prefix_keywords = ['']
# Setting limit on number of images to be downloaded
if arguments['limit']:
limit = int(arguments['limit'])
else:
limit = 100
if arguments['url']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
if arguments['similar_images']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
# If single_image or url argument not present then keywords is mandatory argument
if arguments['single_image'] is None and arguments['url'] is None and arguments['similar_images'] is None and \
arguments['keywords'] is None and arguments['keywords_from_file'] is None:
print('-------------------------------\n'
'Uh oh! Keywords is a required argument \n\n'
'Please refer to the documentation on guide to writing queries \n'
'https://github.com/hardikvasa/google-images-download#examples'
'\n\nexiting!\n'
'-------------------------------')
sys.exit()
# If this argument is present, set the custom output directory
if arguments['output_directory']:
main_directory = arguments['output_directory']
else:
main_directory = "downloads"
# Proxy settings
if arguments['proxy']:
os.environ["http_proxy"] = arguments['proxy']
os.environ["https_proxy"] = arguments['proxy']
# Add time range to keywords if asked
time_range = ''
if arguments['time_range']:
json_acceptable_string = arguments['time_range'].replace("'", "\"")
d = json.loads(json_acceptable_string)
time_range = ' after:' + d['time_min'] + ' before:' + d['time_max']
######Initialization Complete
total_errors = 0
for pky in prefix_keywords: # 1.for every prefix keywords
for sky in suffix_keywords: # 2.for every suffix keywords
i = 0
while i < len(search_keyword): # 3.for every main keyword
iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + (pky) + (
search_keyword[i]) + (sky)
if not arguments["silent_mode"]:
print(iteration.encode('raw_unicode_escape').decode('utf-8'))
print("Evaluating...")
else:
print("Downloading images for: " + (pky) + (search_keyword[i]) + (sky) + " ...")
search_term = pky + search_keyword[i] + sky
if arguments['image_directory']:
dir_name = arguments['image_directory']
elif arguments['no_directory']:
dir_name = ''
else:
dir_name = search_term + (
'-' + arguments['color'] if arguments['color'] else '') # sub-directory
if not arguments["no_download"]:
self.create_directories(main_directory, dir_name, arguments['thumbnail'],
arguments['thumbnail_only']) # create directories in OS
params = self.build_url_parameters(arguments) # building URL with params
search_term += time_range
url = self.build_search_url(search_term, params, arguments['url'], arguments['similar_images'],
arguments['specific_site'],
arguments['safe_search']) # building main search url
if limit < 101:
images, tabs = self.download_page(url) # download page
else:
images, tabs = self.download_extended_page(url, arguments['chromedriver'])
if not arguments["silent_mode"]:
if arguments['no_download']:
print("Getting URLs without downloading images...")
else:
print("Starting Download...")
items, errorCount, abs_path = self._get_all_items(images, main_directory, dir_name, limit,
arguments) # get all image items and download images
paths[pky + search_keyword[i] + sky] = abs_path
# dumps into a json file
if arguments['extract_metadata']:
try:
if not os.path.exists("logs"):
os.makedirs("logs")
except OSError as e:
print(e)
json_file = open("logs/" + search_keyword[i] + ".json", "w")
json.dump(items, json_file, indent=4, sort_keys=True)
json_file.close()
# Related images
if arguments['related_images']:
print("\nGetting list of related keywords...this may take a few moments")
for key, value in tabs.items():
final_search_term = (search_term + " - " + key)
print("\nNow Downloading - " + final_search_term)
if limit < 101:
images, _ = self.download_page(value) # download page
else:
images, _ = self.download_extended_page(value, arguments['chromedriver'])
self.create_directories(main_directory, final_search_term, arguments['thumbnail'],
arguments['thumbnail_only'])
self._get_all_items(images, main_directory, search_term + " - " + key, limit, arguments)
i += 1
total_errors = total_errors + errorCount
if not arguments["silent_mode"]:
print("\nErrors: " + str(errorCount) + "\n")
return paths, total_errors
# ------------- Main Program -------------#
def main():
records = user_input()
total_errors = 0
t0 = time.time() # start the timer
for arguments in records:
if arguments['single_image']: # Download Single Image using a URL
response = googleimagesdownload()
response.single_image(arguments['single_image'])
else: # or download multiple images based on keywords/keyphrase search
response = googleimagesdownload()
paths, errors = response.download(arguments) # wrapping response in a variable just for consistency
total_errors = total_errors + errors
t1 = time.time() # stop the timer
total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images
if not arguments["silent_mode"]:
print("\nEverything downloaded!")
print("Total errors: " + str(total_errors))
print("Total time taken: " + str(total_time) + " Seconds")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"http_proxy",
"https_proxy"
] |
[]
|
["http_proxy", "https_proxy"]
|
python
| 2 | 0 | |
tests/integration/integration_suite_test.go
|
package integration
import (
"github.com/google/uuid"
"log"
"strings"
. "github.com/k8ssandra/k8ssandra/tests/integration/steps"
"fmt"
"os"
"testing"
)
const (
medusaTestTable = "medusa_test"
medusaTestKeyspace = "medusa"
traefikNamespace = "traefik"
minioNamespace = "minio"
)
// "" means latest stable version in the Helm repo
var upgradeStartVersions = []string{"v1.0.0", "latest"}
func TestMain(m *testing.M) {
err := InitTestClient()
if err != nil {
log.Fatalf("failed to initialize test client: %s", err)
}
os.Exit(m.Run())
}
func initializeCluster(t *testing.T) string {
log.Println(Step("Initializing cluster"))
CheckK8sClusterIsReachable(t)
InstallTraefik(t)
namespace := CreateNamespace(t)
CheckNamespaceWasCreated(t, namespace)
return namespace
}
func cleanupClusterOption() string {
if os.Getenv("CLUSTER_CLEANUP") != "" {
return os.Getenv("CLUSTER_CLEANUP")
} else {
return "always"
}
}
func shouldCleanupCluster(success bool) bool {
if cleanupClusterOption() == "always" || (cleanupClusterOption() == "success" && success) {
return true
}
return false
}
func cleanupCluster(t *testing.T, namespace string, success bool) {
if shouldCleanupCluster(success) {
log.Println(Step("Cleaning up cluster"))
UninstallHelmReleaseAndNamespace(t, "k8ssandra", namespace)
WaitForCassandraDatacenterDeletion(t, namespace)
UninstallHelmReleaseAndNamespace(t, "traefik", traefikNamespace)
UninstallHelmReleaseAndNamespace(t, "minio", minioNamespace)
} else {
log.Println(Info("Not cleaning up cluster as requested"))
}
}
// Full stack scenario:
// - Install Minio
// - Create the Minio credentials secret
// - Register a cluster with 3 nodes
// - Run the Reaper test scenario
// - Run the Medusa test scenario
// - Run the Prometheus test scenario
// - Run the Grafana test scenario
// - Run the Stargate test scenario
// - Terminate the namespace and cleanup the cluster
func TestFullStackScenario(t *testing.T) {
const (
medusaBackend = "Minio"
backupName = "backup1"
)
namespace := initializeCluster(t)
success := t.Run("Full Stack Test", func(t *testing.T) {
createMedusaSecretAndInstallDeps(t, namespace, medusaBackend)
deployFullStackCluster(t, namespace, true)
t.Run("Test Reaper", func(t *testing.T) {
testReaper(t)
})
t.Run("Test Medusa", func(t *testing.T) {
testMedusa(t, namespace, medusaBackend, backupName, true)
})
t.Run("Test Prometheus", func(t *testing.T) {
testPrometheus(t, namespace)
})
t.Run("Test Grafana", func(t *testing.T) {
testGrafana(t, namespace)
})
t.Run("Test Stargate", func(t *testing.T) {
// The backup/restore test runs before this. Because it shuts down
// the Cassandra cluster, we need to restart Stargate. See
// https://github.com/k8ssandra/k8ssandra/issues/411 for details.
releaseName := "k8ssandra"
dcName := "dc1"
RestartStargate(t, releaseName, dcName, namespace)
testStargate(t, namespace)
})
})
cleanupCluster(t, namespace, success)
}
func deployFullStackCluster(t *testing.T, namespace string, useLocalCharts bool) {
DeployClusterWithValues(t, namespace, "minio", "cluster_full_stack.yaml", 3, false, useLocalCharts, "")
checkResourcePresenceForReaper(t, namespace)
waitForReaperPod(t, namespace)
checkReaperRegistered(t, namespace)
}
// Reaper scenario:
// - Install Traefik
// - Create a namespace
// - Register a cluster with 3 Cassandra nodes
// - Verify that Reaper correctly initializes
// - Start a repair on the reaper_db keyspace
// - Wait for at least one segment to be processed
// - Cancel the repair
// - Terminate the namespace and delete the cluster
func TestReaperDeploymentScenario(t *testing.T) {
namespace := initializeCluster(t)
success := t.Run("Test Reaper", func(t *testing.T) {
deployClusterForReaper(t, namespace, true)
testReaper(t)
})
cleanupCluster(t, namespace, success)
}
func testReaper(t *testing.T) {
log.Println(Step("Testing Reaper..."))
repairId := triggerRepair(t)
waitForSegmentDoneAndCancel(t, repairId)
}
func deployClusterForReaper(t *testing.T, namespace string, useLocalCharts bool) {
log.Println(Info("Deploying K8ssandra and waiting for Reaper to be ready"))
DeployClusterWithValues(t, namespace, "", "cluster_with_reaper.yaml", 3, false, useLocalCharts, "")
checkResourcePresenceForReaper(t, namespace)
waitForReaperPod(t, namespace)
checkReaperRegistered(t, namespace)
}
func checkResourcePresenceForReaper(t *testing.T, namespace string) {
CheckResourceWithLabelsIsPresent(t, namespace, "service", map[string]string{"app.kubernetes.io/managed-by": "reaper-operator"})
CheckClusterExpectedResources(t, namespace)
}
func waitForReaperPod(t *testing.T, namespace string) {
WaitForReaperPod(t, namespace)
}
func checkReaperRegistered(t *testing.T, namespace string) {
CheckKeyspaceExists(t, namespace, "reaper_db")
CheckClusterIsRegisteredInReaper(t, "k8ssandra")
}
func triggerRepair(t *testing.T) uuid.UUID {
log.Println(Info("Starting a repair"))
return TriggerRepair(t, "k8ssandra", "reaper_db", "k8ssandra")
}
func waitForSegmentDoneAndCancel(t *testing.T, repairId uuid.UUID) {
log.Println(Info("Waiting for one segment to be repaired and canceling run"))
WaitForOneSegmentToBeDone(t, repairId)
CancelRepair(t, repairId)
}
// Medusa scenario (invoked with a specific backend name):
// - Register a cluster with 1 node
// - Potentially install backend specific dependencies (such as Minio)
// - Create the backend credentials secret
// - Create a keyspace and a table
// - Load 10 rows and check that we can read 10 rows
// - Perform a backup with Medusa
// - Load 10 rows and check that we can read 20 rows
// - Restore the backup
// - Verify that we can read 10 rows
// - Terminate the namespace and delete the cluster
func TestMedusaDeploymentScenario(t *testing.T) {
const backupName = "backup1"
backends := []string{"Minio", "local", "S3", "google_storage", "azure_blobs"}
for _, backend := range backends {
t.Run(fmt.Sprintf("Medusa on %s", backend), func(t *testing.T) {
namespace := initializeCluster(t)
medusaSuccess := t.Run("Test backup and restore", func(t *testing.T) {
createMedusaSecretAndInstallDeps(t, namespace, backend)
deployClusterForMedusa(t, namespace, backend, 1, true, "")
testMedusa(t, namespace, backend, backupName, true)
scaleUpCassandra(t, namespace, backend, 2)
})
cleanupCluster(t, namespace, medusaSuccess)
})
}
}
func testMedusa(t *testing.T, namespace, backend, backupName string, useLocalChartForBackup bool) {
log.Println(Step("Testing Medusa..."))
log.Println("Creating test keyspace and table")
CreateCassandraTable(t, namespace, medusaTestTable, medusaTestKeyspace)
loadRowsAndCheckCount(t, namespace, 10, 10)
log.Println(Info("Backing up Cassandra"))
PerformBackup(t, namespace, backupName, useLocalChartForBackup)
if !useLocalChartForBackup {
// This will upgrade the cluster to the local version if the stable chart was used to perform the backup
scaleUpCassandra(t, namespace, backend, 1)
}
loadRowsAndCheckCount(t, namespace, 10, 20)
log.Println(Info("Restoring backup and checking row count"))
RestoreBackup(t, namespace, backupName)
CheckRowCountInTable(t, 10, namespace, medusaTestTable, medusaTestKeyspace)
}
func deployClusterForMedusa(t *testing.T, namespace, backend string, nodes int, useLocalCharts bool, version string) {
log.Println(Info(fmt.Sprintf("Deploying K8ssandra with Medusa using %s", backend)))
valuesFile := fmt.Sprintf("cluster_with_medusa_%s.yaml", strings.ToLower(backend))
DeployClusterWithValues(t, namespace, backend, valuesFile, nodes, false, useLocalCharts, version)
CheckClusterExpectedResources(t, namespace)
}
func loadRowsAndCheckCount(t *testing.T, namespace string, rowsToLoad, rowsExpected int) {
log.Println(Info(fmt.Sprintf("Loading %d rows and checking we have %d after", rowsToLoad, rowsExpected)))
LoadRowsInTable(t, rowsToLoad, namespace, medusaTestTable, medusaTestKeyspace)
CheckRowCountInTable(t, rowsExpected, namespace, medusaTestTable, medusaTestKeyspace)
}
func createMedusaSecretAndInstallDeps(t *testing.T, namespace, backend string) {
log.Println(Info("Creating medusa secret to access the backend"))
switch strings.ToLower(backend) {
case "minio":
DeployMinioAndCreateBucket(t, "k8ssandra-medusa")
CreateMedusaSecretWithFile(t, namespace, "secret/medusa_minio_secret.yaml")
case "s3":
CreateMedusaSecretWithFile(t, namespace, "~/medusa_secret_s3.yaml")
case "google_storage":
CreateMedusaSecretWithFile(t, namespace, "~/medusa_secret_gcs.yaml")
case "azure_blobs":
// Secret name must be medusa-bucket-key
// Secret entry must be medusa_azure_credentials.json
// See tests/integration/charts/cluster_with_medusa_azure_blobs.yaml
CreateMedusaSecretWithFile(t, namespace, "~/medusa_secret_azure.yaml")
}
}
func scaleUpCassandra(t *testing.T, namespace, backend string, nodes int) {
log.Println(Info("Scaling up Cassandra"))
backend = strings.ToLower(backend)
valuesFile := fmt.Sprintf("cluster_with_medusa_%s.yaml", backend)
// This is an ugly, short term hack to fix the failing upgrade tests. The tests need to
// be refactored. See https://github.com/k8ssandra/k8ssandra/issues/1053. I am adding
// an explicit check for minio here because that is the backend used in
// TestRestoreAfterUpgrade.
if backend == "minio" {
valuesFile = fmt.Sprintf("cluster_with_medusa_%s_upgraded.yaml", backend)
}
DeployClusterWithValues(t, namespace, backend, valuesFile, nodes, true, true, "")
}
// Monitoring scenario:
// - Install Traefik
// - Create a namespace
// - Register a cluster with three Cassandra nodes and one Stargate node
// - Check that Prometheus is reachable through its REST API
// - Check the number of active Prometheus targets
// - Check that Grafana is reachable through http
// - Terminate the namespace and delete the cluster
func TestMonitoringDeploymentScenario(t *testing.T) {
namespace := initializeCluster(t)
success := t.Run("Test Monitoring", func(t *testing.T) {
deployClusterForMonitoring(t, namespace)
t.Run("Test Prometheus", func(t *testing.T) {
testPrometheus(t, namespace)
})
t.Run("Test Grafana", func(t *testing.T) {
testGrafana(t, namespace)
})
})
cleanupCluster(t, namespace, success)
}
func deployClusterForMonitoring(t *testing.T, namespace string) {
DeployClusterWithValues(t, namespace, "", "cluster_with_stargate_and_monitoring.yaml", 3, false, true, "")
CheckClusterExpectedResources(t, namespace)
WaitForStargatePodReady(t, namespace)
}
// Prometheus tests
func testPrometheus(t *testing.T, namespace string) {
log.Println(Step("Testing Prometheus..."))
PodWithLabelsIsReady(t, namespace, map[string]string{"app": "prometheus"})
CheckPrometheusMetricExtraction(t)
expectedActiveTargets := CountMonitoredItems(t, namespace)
CheckPrometheusActiveTargets(t, expectedActiveTargets) // We're monitoring 3 Cassandra nodes and 1 Stargate instance
}
// Grafana tests
func testGrafana(t *testing.T, namespace string) {
log.Println(Step("Testing Grafana..."))
PodWithLabelsIsReady(t, namespace, map[string]string{"app.kubernetes.io/name": "grafana"})
CheckGrafanaIsReachable(t)
}
// Stargate scenario:
// - Install Traefik
// - Create a namespace
// - Register a cluster with three Cassandra nodes and one Stargate node
// - Check Stargate rollout
// - Create a document and read it back through the Stargate document API
// - Terminate the namespace and delete the cluster
func TestStargateDeploymentScenario(t *testing.T) {
namespace := initializeCluster(t)
success := t.Run("Test Stargate", func(t *testing.T) {
deployClusterForStargate(t, namespace)
testStargate(t, namespace)
})
cleanupCluster(t, namespace, success)
}
func deployClusterForStargate(t *testing.T, namespace string) {
DeployClusterWithValues(t, namespace, "", "cluster_with_stargate.yaml", 3, false, true, "")
CheckClusterExpectedResources(t, namespace)
WaitForStargatePodReady(t, namespace)
}
func testStargate(t *testing.T, namespace string) {
WaitForAuthEndpoint(t) // Wait for the auth endpoint to be reachable, this takes a little time after the Stargate rollout is complete
log.Println(Step("Writing data to the Stargate document API"))
token := GenerateStargateAuthToken(t, namespace)
docNamespace := CreateStargateDocumentNamespace(t, token)
log.Println(fmt.Sprintf("Created Stargate document namespace: %s", docNamespace))
documentId := WriteStargateDocument(t, token, docNamespace)
log.Println(fmt.Sprintf("Created document with id: %s", documentId))
CheckStargateDocumentExists(t, token, docNamespace, documentId)
}
func TestUpgradeScenario(t *testing.T) {
for _, startVersion := range upgradeStartVersions {
namespace := initializeCluster(t)
success := t.Run(fmt.Sprintf("Upgrade from %s", startVersion), func(t *testing.T) {
// Install first production version
DeployClusterWithValues(t, namespace, "", "cluster_with_reaper.yaml", 1, false, false, startVersion)
checkResourcePresenceForReaper(t, namespace)
waitForReaperPod(t, namespace)
// Upgrade to current version
DeployClusterWithValues(t, namespace, "", "cluster_with_reaper_upgraded.yaml", 1, true, true, "")
checkResourcePresenceForReaper(t, namespace)
waitForReaperPod(t, namespace)
checkReaperRegistered(t, namespace)
})
cleanupCluster(t, namespace, success)
if !success {
t.FailNow()
}
}
}
// Upgrade scenario:
// - Install Traefik
// - Create a namespace
// - Register a cluster with one Cassandra nodes and one Stargate node using the latest stable
// - Load data and take a backup using the stable version chart
// - Upgrade the cluster to the local version
// - Restore the backup using the local version chart and check that data is here
func TestRestoreAfterUpgrade(t *testing.T) {
const (
medusaBackend = "Minio"
backupName = "backup1"
)
for _, startVersion := range upgradeStartVersions {
if startVersion != "v1.0.0" {
// K8ssandra 1.0.0 didn't support Minio as Medusa backend
namespace := initializeCluster(t)
success := t.Run(fmt.Sprintf("Medusa upgrade from %s", startVersion), func(t *testing.T) {
createMedusaSecretAndInstallDeps(t, namespace, medusaBackend)
deployClusterForMedusa(t, namespace, medusaBackend, 1, false, startVersion)
testMedusa(t, namespace, medusaBackend, backupName, false)
})
cleanupCluster(t, namespace, success)
}
}
}
|
[
"\"CLUSTER_CLEANUP\"",
"\"CLUSTER_CLEANUP\""
] |
[] |
[
"CLUSTER_CLEANUP"
] |
[]
|
["CLUSTER_CLEANUP"]
|
go
| 1 | 0 | |
pkg/router/router.go
|
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package router
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
restful "github.com/emicklei/go-restful"
"github.com/tektoncd/dashboard/pkg/endpoints"
logging "github.com/tektoncd/dashboard/pkg/logging"
corev1 "k8s.io/api/core/v1"
)
// ExtensionLabelKey is the label key required by services to be registered as a
// dashboard extension
const ExtensionLabelKey = "tekton-dashboard-extension"
// ExtensionLabelValue is the label value required by services to be registered
// as a dashboard extension
const ExtensionLabelValue = "true"
// ExtensionLabel is the full label required by services to be registered as a
// dashboard extension
const ExtensionLabel = ExtensionLabelKey + "=" + ExtensionLabelValue
// ExtensionURLKey specifies the valid extension paths, defaults to "/"
const ExtensionURLKey = "tekton-dashboard-endpoints"
// ExtensionEndpointDelimiter is the Delimiter to be used between the extension
// endpoints annotation value
const ExtensionEndpointDelimiter = "."
// ExtensionBundleLocationKey IS the UI bundle location annotation key
const ExtensionBundleLocationKey = "tekton-dashboard-bundle-location"
// ExtensionDisplayNameKey is the display name annotation key
const ExtensionDisplayNameKey = "tekton-dashboard-display-name"
// ExtensionRoot is the URL root when accessing extensions
const ExtensionRoot = "/v1/extensions"
var webResourcesDir = os.Getenv("WEB_RESOURCES_DIR")
var webResourcesStaticPattern = regexp.MustCompile("^/([[:alnum:]]+\\.)?[[:alnum:]]+\\.(js)|(css)|(png)$")
var webResourcesStaticExcludePattern = regexp.MustCompile("^/favicon.png$")
// Register returns an HTTP handler that has the Dashboard REST API registered
func Register(resource endpoints.Resource) *Handler {
logging.Log.Info("Registering all endpoints")
h := &Handler{
Container: restful.NewContainer(),
uidExtensionMap: make(map[string]*Extension),
}
registerWeb(h.Container)
registerEndpoints(resource, h.Container)
registerPropertiesEndpoint(resource, h.Container)
registerWebsocket(resource, h.Container)
registerHealthProbe(resource, h.Container)
registerReadinessProbe(resource, h.Container)
registerKubeAPIProxy(resource, h.Container)
h.registerExtensions()
return h
}
// Handler is an HTTP handler with internal configuration to avoid global state
type Handler struct {
*restful.Container
// extensionWebService is the exposed dynamic route webservice that
// extensions are added to
extensionWebService *restful.WebService
uidExtensionMap map[string]*Extension
sync.RWMutex
}
// RegisterExtension registers a discovered extension service as a webservice
// to the container/mux. The extension should have a unique name
func (h *Handler) RegisterExtension(extensionService *corev1.Service) {
logging.Log.Infof("Adding Extension %s", extensionService.Name)
ext := newExtension(extensionService)
h.Lock()
defer h.Unlock()
// Add routes for extension service
for _, path := range ext.endpoints {
extensionPath := extensionPath(ext.Name, path)
// Routes
h.extensionWebService.Route(h.extensionWebService.GET(extensionPath).To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.POST(extensionPath).To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.PUT(extensionPath).To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.DELETE(extensionPath).To(ext.handleExtension))
// Subroutes
h.extensionWebService.Route(h.extensionWebService.GET(extensionPath + "/{var:*}").To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.POST(extensionPath + "/{var:*}").To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.PUT(extensionPath + "/{var:*}").To(ext.handleExtension))
h.extensionWebService.Route(h.extensionWebService.DELETE(extensionPath + "/{var:*}").To(ext.handleExtension))
}
h.uidExtensionMap[string(extensionService.UID)] = ext
}
// UnregisterExtension unregisters an extension. This should be called BEFORE
// registration of extensionService on informer update
func (h *Handler) UnregisterExtension(extensionService *corev1.Service) {
logging.Log.Infof("Removing extension %s", extensionService.Name)
h.Lock()
defer h.Unlock()
// Grab endpoints to remove from service
ext := h.uidExtensionMap[string(extensionService.UID)]
for _, path := range ext.endpoints {
extensionPath := extensionPath(ext.Name, path)
fullPath := fmt.Sprintf("%s/%s", h.extensionWebService.RootPath(), extensionPath)
// Routes must be removed individually and should correspond to the above registration
h.extensionWebService.RemoveRoute(fullPath, "GET")
h.extensionWebService.RemoveRoute(fullPath, "POST")
h.extensionWebService.RemoveRoute(fullPath, "PUT")
h.extensionWebService.RemoveRoute(fullPath, "DELETE")
h.extensionWebService.RemoveRoute(fullPath+"/{var:*}", "GET")
h.extensionWebService.RemoveRoute(fullPath+"/{var:*}", "POST")
h.extensionWebService.RemoveRoute(fullPath+"/{var:*}", "PUT")
h.extensionWebService.RemoveRoute(fullPath+"/{var:*}", "DELETE")
}
delete(h.uidExtensionMap, string(extensionService.UID))
}
// registerExtensions registers the WebService responsible for
// proxying to all extensions and also the endpoint to get all extensions
func (h *Handler) registerExtensions() {
logging.Log.Info("Adding API for Extensions")
extensionWebService := new(restful.WebService)
extensionWebService.SetDynamicRoutes(true)
extensionWebService.
Path(ExtensionRoot).
Consumes(restful.MIME_JSON).
Produces(restful.MIME_JSON)
extensionWebService.Route(extensionWebService.GET("").To(h.getAllExtensions))
h.Add(extensionWebService)
h.extensionWebService = extensionWebService
}
type RedactedExtension struct {
Name string `json:"name"`
DisplayName string `json:"displayname"`
BundleLocation string `json:"bundlelocation"`
endpoints []string
}
// getExtensions gets all of the registered extensions on the handler
func (h *Handler) getExtensions() []RedactedExtension {
h.RLock()
defer h.RUnlock()
extensions := []RedactedExtension{}
for _, e := range h.uidExtensionMap {
redactedExtension := RedactedExtension{
Name: e.Name,
DisplayName: e.DisplayName,
BundleLocation: e.BundleLocation,
endpoints: e.endpoints,
}
extensions = append(extensions, redactedExtension)
}
return extensions
}
// getAllExtensions returns all of the extensions within the install namespace
func (h *Handler) getAllExtensions(request *restful.Request, response *restful.Response) {
logging.Log.Debugf("In getAllExtensions")
extensions := h.getExtensions()
logging.Log.Debugf("Extensions: %+v", extensions)
response.WriteEntity(extensions)
}
func registerKubeAPIProxy(r endpoints.Resource, container *restful.Container) {
proxy := new(restful.WebService)
proxy.Consumes(restful.MIME_JSON, "text/plain", "application/json-patch+json").
Produces(restful.MIME_JSON, "text/plain", "application/json-patch+json").
Path("/proxy")
logging.Log.Info("Adding Kube API Proxy")
proxy.Route(proxy.GET("/{subpath:*}").To(r.ProxyRequest))
proxy.Route(proxy.POST("/{subpath:*}").To(r.ProxyRequest))
proxy.Route(proxy.PUT("/{subpath:*}").To(r.ProxyRequest))
proxy.Route(proxy.DELETE("/{subpath:*}").To(r.ProxyRequest))
proxy.Route(proxy.PATCH("/{subpath:*}").To(r.ProxyRequest))
container.Add(proxy)
}
func registerWeb(container *restful.Container) {
logging.Log.Info("Adding Web API")
fs := http.FileServer(http.Dir(webResourcesDir))
container.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if webResourcesStaticPattern.Match([]byte(r.URL.Path)) && !webResourcesStaticExcludePattern.Match([]byte(r.URL.Path)) {
// Static resources are immutable and have a content hash in their URL
w.Header().Set("Cache-Control", "public, max-age=31536000, immutable")
}
w.Header().Set("X-Frame-Options", "deny")
fs.ServeHTTP(w, r)
}))
}
// registerEndpoints registers the APIs to interface with core Tekton/K8s pieces
func registerEndpoints(r endpoints.Resource, container *restful.Container) {
wsv1 := new(restful.WebService)
wsv1.
Path("/v1/namespaces").
Consumes(restful.MIME_JSON, "text/plain").
Produces(restful.MIME_JSON, "text/plain")
logging.Log.Info("Adding v1, and API for k8s resources and pipelines")
wsv1.Route(wsv1.POST("/{namespace}/rerun").To(r.RerunPipelineRun))
wsv1.Route(wsv1.GET("/{namespace}/ingress").To(r.GetIngress))
wsv1.Route(wsv1.GET("/{namespace}/endpoints").To(r.GetEndpoints))
wsv1.Route(wsv1.GET("/{namespace}/taskrunlogs/{name}").To(r.GetTaskRunLog))
wsv1.Route(wsv1.GET("/{namespace}/pipelinerunlogs/{name}").To(r.GetPipelineRunLog))
container.Add(wsv1)
}
// registerWebsocket registers a websocket with which we can send log
// information
func registerWebsocket(r endpoints.Resource, container *restful.Container) {
logging.Log.Info("Adding API for websocket")
wsv2 := new(restful.WebService)
wsv2.
Path("/v1/websockets").
Consumes(restful.MIME_JSON).
Produces(restful.MIME_JSON)
wsv2.Route(wsv2.GET("/resources").To(r.EstablishResourcesWebsocket))
container.Add(wsv2)
}
// registerHealthProbes registers the /health endpoint
func registerHealthProbe(r endpoints.Resource, container *restful.Container) {
logging.Log.Info("Adding API for health")
wsv3 := new(restful.WebService)
wsv3.
Path("/health")
wsv3.Route(wsv3.GET("").To(r.CheckHealth))
container.Add(wsv3)
}
// registerReadinessProbes registers the /readiness endpoint
func registerReadinessProbe(r endpoints.Resource, container *restful.Container) {
logging.Log.Info("Adding API for readiness")
wsv4 := new(restful.WebService)
wsv4.
Path("/readiness")
wsv4.Route(wsv4.GET("").To(r.CheckHealth))
container.Add(wsv4)
}
// registerPropertiesEndpoint adds the endpoint for obtaining any properties we
// want to serve.
func registerPropertiesEndpoint(r endpoints.Resource, container *restful.Container) {
logging.Log.Info("Adding API for properties")
wsDefaults := new(restful.WebService)
wsDefaults.
Path("/v1/properties").
Consumes(restful.MIME_JSON, "text/plain").
Produces(restful.MIME_JSON, "text/plain")
wsDefaults.Route(wsDefaults.GET("/").To(r.GetProperties))
container.Add(wsDefaults)
}
// Extension is the back-end representation of an extension. A service is an
// extension when it is in the dashboard namespace with the dashboard label
// key/value pair. Endpoints are specified with the extension URL annotation
type Extension struct {
Name string `json:"name"`
URL *url.URL `json:"url"`
Port string `json:"port"`
DisplayName string `json:"displayname"`
BundleLocation string `json:"bundlelocation"`
endpoints []string
}
// newExtension returns a new extension
func newExtension(extService *corev1.Service) *Extension {
port := getServicePort(extService)
url, _ := url.ParseRequestURI(fmt.Sprintf("http://%s:%s", extService.Spec.ClusterIP, port))
return &Extension{
Name: extService.ObjectMeta.Name,
URL: url,
Port: port,
DisplayName: extService.ObjectMeta.Annotations[ExtensionDisplayNameKey],
BundleLocation: extService.ObjectMeta.Annotations[ExtensionBundleLocationKey],
endpoints: getExtensionEndpoints(extService.ObjectMeta.Annotations[ExtensionURLKey]),
}
}
// MarshalJSON marshals the Extension into JSON. This override is explicitly
// declared since url.URL will marshal each component, where a single field of
// the string representation is desired. An alias for Extension is used to
// prevent a stack overflow
func (e Extension) MarshalJSON() ([]byte, error) {
type Alias Extension
return json.Marshal(&struct {
URL string `json:"url"`
*Alias
}{
URL: e.URL.String(),
Alias: (*Alias)(&e),
})
}
// handleExtension handles requests to the extension service by stripping the
// extension root prefix from the request URL and reverse proxying
func (e Extension) handleExtension(request *restful.Request, response *restful.Response) {
logging.Log.Debugf("Request Path: %s %+v", request.Request.Method, request.Request.URL.Path)
request.Request.URL.Path = strings.TrimPrefix(request.Request.URL.Path, fmt.Sprintf("%s/%s", ExtensionRoot, e.Name))
// Explicitly route to root, better visibility in logs
if request.Request.URL.Path == "" {
request.Request.URL.Path = "/"
}
logging.Log.Debugf("Proxy Path: %s %+v", request.Request.Method, request.Request.URL.Path)
proxy := httputil.NewSingleHostReverseProxy(e.URL)
proxy.ServeHTTP(response, request.Request)
}
// getExtensionEndpoints sanitizes the delimited endpoints
func getExtensionEndpoints(delimited string) []string {
endpoints := strings.Split(delimited, ExtensionEndpointDelimiter)
if endpoints == nil {
return []string{""}
}
for i := range endpoints {
// Remove trailing/leading slashes
endpoints[i] = strings.TrimSuffix(endpoints[i], "/")
endpoints[i] = strings.TrimPrefix(endpoints[i], "/")
}
return endpoints
}
// extensionPath constructs the extension path (excluding the root) used by
// restful.Route
func extensionPath(extName, path string) string {
return strings.TrimSuffix(fmt.Sprintf("%s/%s", extName, path), "/")
}
// getServicePort returns the target port if exists or the source port otherwise
func getServicePort(svc *corev1.Service) string {
if svc.Spec.Ports[0].TargetPort.StrVal != "" {
return svc.Spec.Ports[0].TargetPort.String()
}
return strconv.Itoa(int(svc.Spec.Ports[0].Port))
}
|
[
"\"WEB_RESOURCES_DIR\""
] |
[] |
[
"WEB_RESOURCES_DIR"
] |
[]
|
["WEB_RESOURCES_DIR"]
|
go
| 1 | 0 | |
vendor/github.com/openshift/origin/pkg/generate/app/sourcelookup.go
|
package app
import (
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/golang/glog"
s2iapi "github.com/openshift/source-to-image/pkg/api"
s2igit "github.com/openshift/source-to-image/pkg/scm/git"
s2iutil "github.com/openshift/source-to-image/pkg/util"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation"
buildapi "github.com/openshift/origin/pkg/build/apis/build"
"github.com/openshift/origin/pkg/generate"
"github.com/openshift/origin/pkg/generate/git"
"github.com/openshift/origin/pkg/generate/source"
)
type Dockerfile interface {
AST() *parser.Node
Contents() string
}
func NewDockerfileFromFile(path string) (Dockerfile, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
if len(data) == 0 {
return nil, fmt.Errorf("Dockerfile %q is empty", path)
}
return NewDockerfile(string(data))
}
func NewDockerfile(contents string) (Dockerfile, error) {
if len(contents) == 0 {
return nil, errors.New("Dockerfile is empty")
}
node, err := parser.Parse(strings.NewReader(contents))
if err != nil {
return nil, err
}
return dockerfileContents{node, contents}, nil
}
type dockerfileContents struct {
ast *parser.Node
contents string
}
func (d dockerfileContents) AST() *parser.Node {
return d.ast
}
func (d dockerfileContents) Contents() string {
return d.contents
}
// IsRemoteRepository checks whether the provided string is a remote repository or not
func IsRemoteRepository(s string) (bool, error) {
if !s2igit.New(s2iutil.NewFileSystem()).ValidCloneSpecRemoteOnly(s) {
glog.V(5).Infof("%s is not a valid remote git clone spec", s)
return false, nil
}
url, err := url.Parse(s)
if err != nil {
glog.V(5).Infof("%s is not a valid url: %v", s, err)
return false, err
}
url.Fragment = ""
gitRepo := git.NewRepository()
// try up to 3 times to reach the remote git repo
for i := 0; i < 3; i++ {
_, _, err = gitRepo.ListRemote(url.String())
if err == nil {
break
}
}
if err != nil {
glog.V(5).Infof("could not list git remotes for %s: %v", s, err)
return false, err
}
glog.V(5).Infof("%s is a valid remote git repository", s)
return true, nil
}
// SourceRepository represents a code repository that may be the target of a build.
type SourceRepository struct {
location string
url url.URL
localDir string
remoteURL *url.URL
contextDir string
secrets []buildapi.SecretBuildSource
info *SourceRepositoryInfo
sourceImage ComponentReference
sourceImageFrom string
sourceImageTo string
usedBy []ComponentReference
strategy generate.Strategy
ignoreRepository bool
binary bool
forceAddDockerfile bool
requiresAuth bool
}
// NewSourceRepository creates a reference to a local or remote source code repository from
// a URL or path.
func NewSourceRepository(s string, strategy generate.Strategy) (*SourceRepository, error) {
location, err := git.ParseRepository(s)
if err != nil {
return nil, err
}
return &SourceRepository{
location: s,
url: *location,
strategy: strategy,
}, nil
}
// NewSourceRepositoryWithDockerfile creates a reference to a local source code repository with
// the provided relative Dockerfile path (defaults to "Dockerfile").
func NewSourceRepositoryWithDockerfile(s, dockerfilePath string) (*SourceRepository, error) {
r, err := NewSourceRepository(s, generate.StrategyDocker)
if err != nil {
return nil, err
}
if len(dockerfilePath) == 0 {
dockerfilePath = "Dockerfile"
}
f, err := NewDockerfileFromFile(filepath.Join(s, dockerfilePath))
if err != nil {
return nil, err
}
if r.info == nil {
r.info = &SourceRepositoryInfo{}
}
r.info.Dockerfile = f
return r, nil
}
// NewSourceRepositoryForDockerfile creates a source repository that is set up to use
// the contents of a Dockerfile as the input of the build.
func NewSourceRepositoryForDockerfile(contents string) (*SourceRepository, error) {
s := &SourceRepository{
ignoreRepository: true,
strategy: generate.StrategyDocker,
}
err := s.AddDockerfile(contents)
return s, err
}
// NewBinarySourceRepository creates a source repository that is configured for binary
// input.
func NewBinarySourceRepository(strategy generate.Strategy) *SourceRepository {
return &SourceRepository{
binary: true,
ignoreRepository: true,
strategy: strategy,
}
}
// TODO: this doesn't really match the others - this should likely be a different type of
// object that is associated with a build or component.
func NewImageSourceRepository(compRef ComponentReference, from, to string) *SourceRepository {
return &SourceRepository{
sourceImage: compRef,
sourceImageFrom: from,
sourceImageTo: to,
ignoreRepository: true,
location: compRef.Input().From,
strategy: generate.StrategySource,
}
}
// UsedBy sets up which component uses the source repository
func (r *SourceRepository) UsedBy(ref ComponentReference) {
r.usedBy = append(r.usedBy, ref)
}
// Remote checks whether the source repository is remote
func (r *SourceRepository) Remote() bool {
return r.url.Scheme != "file"
}
// InUse checks if the source repository is in use
func (r *SourceRepository) InUse() bool {
return len(r.usedBy) > 0
}
// SetStrategy sets the source repository strategy
func (r *SourceRepository) SetStrategy(strategy generate.Strategy) {
r.strategy = strategy
}
// GetStrategy returns the source repository strategy
func (r *SourceRepository) GetStrategy() generate.Strategy {
return r.strategy
}
func (r *SourceRepository) String() string {
return r.location
}
// Detect clones source locally if not already local and runs code detection
// with the given detector.
func (r *SourceRepository) Detect(d Detector, dockerStrategy bool) error {
if r.info != nil {
return nil
}
path, err := r.LocalPath()
if err != nil {
return err
}
r.info, err = d.Detect(path, dockerStrategy)
if err != nil {
return err
}
if err = r.DetectAuth(); err != nil {
return err
}
return nil
}
// SetInfo sets the source repository info. This is to facilitate certain tests.
func (r *SourceRepository) SetInfo(info *SourceRepositoryInfo) {
r.info = info
}
// Info returns the source repository info generated on code detection
func (r *SourceRepository) Info() *SourceRepositoryInfo {
return r.info
}
// LocalPath returns the local path of the source repository
func (r *SourceRepository) LocalPath() (string, error) {
if len(r.localDir) > 0 {
return r.localDir, nil
}
switch {
case r.url.Scheme == "file":
r.localDir = filepath.Join(r.url.Path, r.contextDir)
default:
gitRepo := git.NewRepository()
var err error
if r.localDir, err = ioutil.TempDir("", "gen"); err != nil {
return "", err
}
localURL, ref := cloneURLAndRef(&r.url)
r.localDir, err = CloneAndCheckoutSources(gitRepo, localURL.String(), ref, r.localDir, r.contextDir)
if err != nil {
return "", err
}
}
if _, err := os.Stat(r.localDir); os.IsNotExist(err) {
return "", fmt.Errorf("supplied context directory '%s' does not exist in '%s'", r.contextDir, r.url.String())
}
return r.localDir, nil
}
func cloneURLAndRef(url *url.URL) (*url.URL, string) {
localURL := *url
ref := localURL.Fragment
localURL.Fragment = ""
return &localURL, ref
}
// DetectAuth returns an error if the source repository cannot be cloned
// without the current user's environment. The following changes are made to the
// environment:
// 1) The HOME directory is set to a temporary dir to avoid loading any settings in .gitconfig
// 2) The GIT_SSH variable is set to /dev/null so the regular SSH keys are not used
// (changing the HOME directory is not enough).
// 3) GIT_CONFIG_NOSYSTEM prevents git from loading system-wide config
// 4) GIT_ASKPASS to prevent git from prompting for a user/password
func (r *SourceRepository) DetectAuth() error {
url, ok, err := r.RemoteURL()
if err != nil {
return err
}
if !ok {
return nil // No auth needed, we can't find a remote URL
}
tempHome, err := ioutil.TempDir("", "githome")
if err != nil {
return err
}
defer os.RemoveAll(tempHome)
tempSrc, err := ioutil.TempDir("", "gen")
if err != nil {
return err
}
defer os.RemoveAll(tempSrc)
env := []string{
fmt.Sprintf("HOME=%s", tempHome),
"GIT_SSH=/dev/null",
"GIT_CONFIG_NOSYSTEM=true",
"GIT_ASKPASS=true",
}
if runtime.GOOS == "windows" {
env = append(env,
fmt.Sprintf("ProgramData=%s", os.Getenv("ProgramData")),
fmt.Sprintf("SystemRoot=%s", os.Getenv("SystemRoot")),
)
}
gitRepo := git.NewRepositoryWithEnv(env)
localURL, ref := cloneURLAndRef(url)
_, err = CloneAndCheckoutSources(gitRepo, localURL.String(), ref, tempSrc, "")
if err != nil {
r.requiresAuth = true
}
return nil
}
// RemoteURL returns the remote URL of the source repository
func (r *SourceRepository) RemoteURL() (*url.URL, bool, error) {
if r.remoteURL != nil {
return r.remoteURL, true, nil
}
switch r.url.Scheme {
case "file":
gitRepo := git.NewRepository()
remote, ok, err := gitRepo.GetOriginURL(r.url.Path)
if err != nil && err != git.ErrGitNotAvailable {
return nil, false, err
}
if !ok {
return nil, ok, nil
}
ref := gitRepo.GetRef(r.url.Path)
if len(ref) > 0 {
remote = fmt.Sprintf("%s#%s", remote, ref)
}
if r.remoteURL, err = git.ParseRepository(remote); err != nil {
return nil, false, err
}
default:
r.remoteURL = &r.url
}
return r.remoteURL, true, nil
}
// SetContextDir sets the context directory to use for the source repository
func (r *SourceRepository) SetContextDir(dir string) {
r.contextDir = dir
}
// ContextDir returns the context directory of the source repository
func (r *SourceRepository) ContextDir() string {
return r.contextDir
}
// Secrets returns the secrets
func (r *SourceRepository) Secrets() []buildapi.SecretBuildSource {
return r.secrets
}
// SetSourceImage sets the source(input) image for a repository
func (r *SourceRepository) SetSourceImage(c ComponentReference) {
r.sourceImage = c
}
// SetSourceImagePath sets the source/destination to use when copying from the SourceImage
func (r *SourceRepository) SetSourceImagePath(source, dest string) {
r.sourceImageFrom = source
r.sourceImageTo = dest
}
// AddDockerfile adds the Dockerfile contents to the SourceRepository and
// configure it to build with Docker strategy. Returns an error if the contents
// are invalid.
func (r *SourceRepository) AddDockerfile(contents string) error {
dockerfile, err := NewDockerfile(contents)
if err != nil {
return err
}
if r.info == nil {
r.info = &SourceRepositoryInfo{}
}
r.info.Dockerfile = dockerfile
r.SetStrategy(generate.StrategyDocker)
r.forceAddDockerfile = true
return nil
}
// AddBuildSecrets adds the defined secrets into a build. The input format for
// the secrets is "<secretName>:<destinationDir>". The destinationDir is
// optional and when not specified the default is the current working directory.
func (r *SourceRepository) AddBuildSecrets(secrets []string) error {
injections := s2iapi.VolumeList{}
r.secrets = []buildapi.SecretBuildSource{}
for _, in := range secrets {
if err := injections.Set(in); err != nil {
return err
}
}
secretExists := func(name string) bool {
for _, s := range r.secrets {
if s.Secret.Name == name {
return true
}
}
return false
}
for _, in := range injections {
if r.GetStrategy() == generate.StrategyDocker && filepath.IsAbs(in.Destination) {
return fmt.Errorf("for the docker strategy, the secret destination directory %q must be a relative path", in.Destination)
}
if len(validation.ValidateSecretName(in.Source, false)) != 0 {
return fmt.Errorf("the %q must be valid secret name", in.Source)
}
if secretExists(in.Source) {
return fmt.Errorf("the %q secret can be used just once", in.Source)
}
r.secrets = append(r.secrets, buildapi.SecretBuildSource{
Secret: kapi.LocalObjectReference{Name: in.Source},
DestinationDir: in.Destination,
})
}
return nil
}
// SourceRepositories is a list of SourceRepository objects
type SourceRepositories []*SourceRepository
func (rr SourceRepositories) String() string {
repos := []string{}
for _, r := range rr {
repos = append(repos, r.String())
}
return strings.Join(repos, ",")
}
// NotUsed returns the list of SourceRepositories that are not used
func (rr SourceRepositories) NotUsed() SourceRepositories {
notUsed := SourceRepositories{}
for _, r := range rr {
if !r.InUse() {
notUsed = append(notUsed, r)
}
}
return notUsed
}
// SourceRepositoryInfo contains info about a source repository
type SourceRepositoryInfo struct {
Path string
Types []SourceLanguageType
Dockerfile Dockerfile
Jenkinsfile bool
}
// Terms returns which languages the source repository was
// built with
func (info *SourceRepositoryInfo) Terms() []string {
terms := []string{}
for i := range info.Types {
terms = append(terms, info.Types[i].Term())
}
return terms
}
// SourceLanguageType contains info about the type of the language
// a source repository is built in
type SourceLanguageType struct {
Platform string
Version string
}
// Term returns a search term for the given source language type
// the term will be in the form of language:version
func (t *SourceLanguageType) Term() string {
if len(t.Version) == 0 {
return t.Platform
}
return fmt.Sprintf("%s:%s", t.Platform, t.Version)
}
// Detector is an interface for detecting information about a
// source repository
type Detector interface {
Detect(dir string, dockerStrategy bool) (*SourceRepositoryInfo, error)
}
// SourceRepositoryEnumerator implements the Detector interface
type SourceRepositoryEnumerator struct {
Detectors source.Detectors
DockerfileTester generate.Tester
JenkinsfileTester generate.Tester
}
// Detect extracts source code information about the provided source repository
func (e SourceRepositoryEnumerator) Detect(dir string, noSourceDetection bool) (*SourceRepositoryInfo, error) {
info := &SourceRepositoryInfo{
Path: dir,
}
// no point in doing source-type detection if the requested build strategy
// is docker or pipeline
if !noSourceDetection {
for _, d := range e.Detectors {
if detected := d(dir); detected != nil {
info.Types = append(info.Types, SourceLanguageType{
Platform: detected.Platform,
Version: detected.Version,
})
}
}
}
if path, ok, err := e.DockerfileTester.Has(dir); err == nil && ok {
dockerfile, err := NewDockerfileFromFile(path)
if err != nil {
return nil, err
}
info.Dockerfile = dockerfile
}
if _, ok, err := e.JenkinsfileTester.Has(dir); err == nil && ok {
info.Jenkinsfile = true
}
return info, nil
}
// StrategyAndSourceForRepository returns the build strategy and source code reference
// of the provided source repository
// TODO: user should be able to choose whether to download a remote source ref for
// more info
func StrategyAndSourceForRepository(repo *SourceRepository, image *ImageRef) (*BuildStrategyRef, *SourceRef, error) {
strategy := &BuildStrategyRef{
Base: image,
Strategy: repo.strategy,
}
source := &SourceRef{
Binary: repo.binary,
Secrets: repo.secrets,
RequiresAuth: repo.requiresAuth,
}
if repo.sourceImage != nil {
srcImageRef, err := InputImageFromMatch(repo.sourceImage.Input().ResolvedMatch)
if err != nil {
return nil, nil, err
}
source.SourceImage = srcImageRef
source.ImageSourcePath = repo.sourceImageFrom
source.ImageDestPath = repo.sourceImageTo
}
if (repo.ignoreRepository || repo.forceAddDockerfile) && repo.Info() != nil && repo.Info().Dockerfile != nil {
source.DockerfileContents = repo.Info().Dockerfile.Contents()
}
if !repo.ignoreRepository {
remoteURL, ok, err := repo.RemoteURL()
if err != nil {
return nil, nil, fmt.Errorf("cannot obtain remote URL for repository at %s", repo.location)
}
if ok {
source.URL = remoteURL
source.Ref = remoteURL.Fragment
} else {
source.Binary = true
}
source.ContextDir = repo.ContextDir()
}
return strategy, source, nil
}
// CloneAndCheckoutSources clones the remote repository using either regular
// git clone operation or shallow git clone, based on the "ref" provided (you
// cannot shallow clone using the 'ref').
// This function will return the full path to the buildable sources, including
// the context directory if specified.
func CloneAndCheckoutSources(repo git.Repository, remote, ref, localDir, contextDir string) (string, error) {
if len(ref) == 0 {
glog.V(5).Infof("No source ref specified, using shallow git clone")
if err := repo.CloneWithOptions(localDir, remote, git.Shallow, "--recursive"); err != nil {
return "", fmt.Errorf("shallow cloning repository %q to %q failed: %v", remote, localDir, err)
}
} else {
glog.V(5).Infof("Requested ref %q, performing full git clone and git checkout", ref)
if err := repo.Clone(localDir, remote); err != nil {
return "", fmt.Errorf("cloning repository %q to %q failed: %v", remote, localDir, err)
}
}
if len(ref) > 0 {
if err := repo.Checkout(localDir, ref); err != nil {
err = repo.PotentialPRRetryAsFetch(localDir, remote, ref, err)
if err != nil {
return "", fmt.Errorf("unable to checkout ref %q in %q repository: %v", ref, remote, err)
}
}
}
if len(contextDir) > 0 {
glog.V(5).Infof("Using context directory %q. The full source path is %q", contextDir, filepath.Join(localDir, contextDir))
}
return filepath.Join(localDir, contextDir), nil
}
|
[
"\"ProgramData\"",
"\"SystemRoot\""
] |
[] |
[
"SystemRoot",
"ProgramData"
] |
[]
|
["SystemRoot", "ProgramData"]
|
go
| 2 | 0 | |
log.go
|
// Copyright (c) 2017 The Aero blockchain developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package abclog
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// defaultFlags specifies changes to the default logger behavior. It is set
// during package init and configured using the LOGFLAGS environment variable.
// New logger backends can override these default flags using WithFlags.
var defaultFlags uint32
// Flags to modify Backend's behavior.
const (
// Llongfile modifies the logger output to include full path and line number
// of the logging callsite, e.g. /a/b/c/main.go:123.
Llongfile uint32 = 1 << iota
// Lshortfile modifies the logger output to include filename and line number
// of the logging callsite, e.g. main.go:123. Overrides Llongfile.
Lshortfile
)
// Read logger flags from the LOGFLAGS environment variable. Multiple flags can
// be set at once, separated by commas.
func init() {
for _, f := range strings.Split(os.Getenv("LOGFLAGS"), ",") {
switch f {
case "longfile":
defaultFlags |= Llongfile
case "shortfile":
defaultFlags |= Lshortfile
}
}
}
// Level is the level at which a logger is configured. All messages sent
// to a level which is below the current level are filtered.
type Level uint32
// Level constants.
const (
LevelTrace Level = iota
LevelDebug
LevelInfo
LevelWarn
LevelError
LevelCritical
LevelOff
)
// levelStrs defines the human-readable names for each logging level.
var levelStrs = [...]string{"TRC", "DBG", "INF", "WRN", "ERR", "CRT", "OFF"}
// LevelFromString returns a level based on the input string s. If the input
// can't be interpreted as a valid log level, the info level and false is
// returned.
func LevelFromString(s string) (l Level, ok bool) {
switch strings.ToLower(s) {
case "trace", "trc":
return LevelTrace, true
case "debug", "dbg":
return LevelDebug, true
case "info", "inf":
return LevelInfo, true
case "warn", "wrn":
return LevelWarn, true
case "error", "err":
return LevelError, true
case "critical", "crt":
return LevelCritical, true
case "off":
return LevelOff, true
default:
return LevelInfo, false
}
}
// String returns the tag of the logger used in log messages, or "OFF" if
// the level will not produce any log output.
func (l Level) String() string {
if l >= LevelOff {
return "OFF"
}
return levelStrs[l]
}
// NewBackend creates a logger backend from a Writer.
func NewBackend(w io.Writer, opts ...BackendOption) *Backend {
b := &Backend{w: w, flag: defaultFlags}
for _, o := range opts {
o(b)
}
return b
}
// Backend is a logging backend. Subsystems created from the backend write to
// the backend's Writer. Backend provides atomic writes to the Writer from all
// subsystems.
type Backend struct {
w io.Writer
mu sync.Mutex // ensures atomic writes
flag uint32
}
// BackendOption is a function used to modify the behavior of a Backend.
type BackendOption func(b *Backend)
// WithFlags configures a Backend to use the specified flags rather than using
// the package's defaults as determined through the LOGFLAGS environment
// variable.
func WithFlags(flags uint32) BackendOption {
return func(b *Backend) {
b.flag = flags
}
}
// bufferPool defines a concurrent safe free list of byte slices used to provide
// temporary buffers for formatting log messages prior to outputting them.
var bufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 0, 120)
return &b // pointer to slice to avoid boxing alloc
},
}
// buffer returns a byte slice from the free list. A new buffer is allocated if
// there are not any available on the free list. The returned byte slice should
// be returned to the fee list by using the recycleBuffer function when the
// caller is done with it.
func buffer() *[]byte {
return bufferPool.Get().(*[]byte)
}
// recycleBuffer puts the provided byte slice, which should have been obtain via
// the buffer function, back on the free list.
func recycleBuffer(b *[]byte) {
*b = (*b)[:0]
bufferPool.Put(b)
}
// From stdlib log package.
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid
// zero-padding.
func itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
// Appends a header in the default format 'YYYY-MM-DD hh:mm:ss.sss [LVL] TAG: '.
// If either of the Lshortfile or Llongfile flags are specified, the file named
// and line number are included after the tag and before the final colon.
func formatHeader(buf *[]byte, t time.Time, lvl, tag string, file string, line int) {
year, month, day := t.Date()
hour, min, sec := t.Clock()
ms := t.Nanosecond() / 1e6
itoa(buf, year, 4)
*buf = append(*buf, '-')
itoa(buf, int(month), 2)
*buf = append(*buf, '-')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
*buf = append(*buf, '.')
itoa(buf, ms, 3)
*buf = append(*buf, " ["...)
*buf = append(*buf, lvl...)
*buf = append(*buf, "] "...)
*buf = append(*buf, tag...)
if file != "" {
*buf = append(*buf, ' ')
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, line, -1)
}
*buf = append(*buf, ": "...)
}
// calldepth is the call depth of the callsite function relative to the
// caller of the subsystem logger. It is used to recover the filename and line
// number of the logging call if either the short or long file flags are
// specified.
const calldepth = 3
// callsite returns the file name and line number of the callsite to the
// subsystem logger.
func callsite(flag uint32) (string, int) {
_, file, line, ok := runtime.Caller(calldepth)
if !ok {
return "???", 0
}
if flag&Lshortfile != 0 {
short := file
for i := len(file) - 1; i > 0; i-- {
if os.IsPathSeparator(file[i]) {
short = file[i+1:]
break
}
}
file = short
}
return file, line
}
// print outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments using the default formatting
// rules.
func (b *Backend) print(lvl, tag string, args ...interface{}) {
t := time.Now() // get as early as possible
bytebuf := buffer()
var file string
var line int
if b.flag&(Lshortfile|Llongfile) != 0 {
file, line = callsite(b.flag)
}
formatHeader(bytebuf, t, lvl, tag, file, line)
buf := bytes.NewBuffer(*bytebuf)
fmt.Fprintln(buf, args...)
*bytebuf = buf.Bytes()
b.mu.Lock()
b.w.Write(*bytebuf)
b.mu.Unlock()
recycleBuffer(bytebuf)
}
// printf outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments according to the given format
// specifier.
func (b *Backend) printf(lvl, tag string, format string, args ...interface{}) {
t := time.Now() // get as early as possible
bytebuf := buffer()
var file string
var line int
if b.flag&(Lshortfile|Llongfile) != 0 {
file, line = callsite(b.flag)
}
formatHeader(bytebuf, t, lvl, tag, file, line)
buf := bytes.NewBuffer(*bytebuf)
fmt.Fprintf(buf, format, args...)
*bytebuf = append(buf.Bytes(), '\n')
b.mu.Lock()
b.w.Write(*bytebuf)
b.mu.Unlock()
recycleBuffer(bytebuf)
}
// Logger returns a new logger for a particular subsystem that writes to the
// Backend b. A tag describes the subsystem and is included in all log
// messages. The logger uses the info verbosity level by default.
func (b *Backend) Logger(subsystemTag string) Logger {
return &slog{LevelInfo, subsystemTag, b}
}
// slog is a subsystem logger for a Backend. Implements the Logger interface.
type slog struct {
lvl Level // atomic
tag string
b *Backend
}
// Trace formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelTrace.
//
// This is part of the Logger interface implementation.
func (l *slog) Trace(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelTrace {
l.b.print("TRC", l.tag, args...)
}
}
// Tracef formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelTrace.
//
// This is part of the Logger interface implementation.
func (l *slog) Tracef(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelTrace {
l.b.printf("TRC", l.tag, format, args...)
}
}
// Debug formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelDebug.
//
// This is part of the Logger interface implementation.
func (l *slog) Debug(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelDebug {
l.b.print("DBG", l.tag, args...)
}
}
// Debugf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelDebug.
//
// This is part of the Logger interface implementation.
func (l *slog) Debugf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelDebug {
l.b.printf("DBG", l.tag, format, args...)
}
}
// Info formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelInfo.
//
// This is part of the Logger interface implementation.
func (l *slog) Info(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelInfo {
l.b.print("INF", l.tag, args...)
}
}
// Infof formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelInfo.
//
// This is part of the Logger interface implementation.
func (l *slog) Infof(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelInfo {
l.b.printf("INF", l.tag, format, args...)
}
}
// Warn formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelWarn.
//
// This is part of the Logger interface implementation.
func (l *slog) Warn(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelWarn {
l.b.print("WRN", l.tag, args...)
}
}
// Warnf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelWarn.
//
// This is part of the Logger interface implementation.
func (l *slog) Warnf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelWarn {
l.b.printf("WRN", l.tag, format, args...)
}
}
// Error formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelError.
//
// This is part of the Logger interface implementation.
func (l *slog) Error(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelError {
l.b.print("ERR", l.tag, args...)
}
}
// Errorf formats message according to format specifier, prepends the prefix as
// necessary, and writes to log with LevelError.
//
// This is part of the Logger interface implementation.
func (l *slog) Errorf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelError {
l.b.printf("ERR", l.tag, format, args...)
}
}
// Critical formats message using the default formats for its operands, prepends
// the prefix as necessary, and writes to log with LevelCritical.
//
// This is part of the Logger interface implementation.
func (l *slog) Critical(args ...interface{}) {
lvl := l.Level()
if lvl <= LevelCritical {
l.b.print("CRT", l.tag, args...)
}
}
// Criticalf formats message according to format specifier, prepends the prefix
// as necessary, and writes to log with LevelCritical.
//
// This is part of the Logger interface implementation.
func (l *slog) Criticalf(format string, args ...interface{}) {
lvl := l.Level()
if lvl <= LevelCritical {
l.b.printf("CRT", l.tag, format, args...)
}
}
// Level returns the current logging level
//
// This is part of the Logger interface implementation.
func (l *slog) Level() Level {
return Level(atomic.LoadUint32((*uint32)(&l.lvl)))
}
// SetLevel changes the logging level to the passed level.
//
// This is part of the Logger interface implementation.
func (l *slog) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&l.lvl), uint32(level))
}
// Disabled is a Logger that will never output anything.
var Disabled Logger
func init() {
Disabled = &slog{lvl: LevelOff, b: NewBackend(ioutil.Discard)}
}
|
[
"\"LOGFLAGS\""
] |
[] |
[
"LOGFLAGS"
] |
[]
|
["LOGFLAGS"]
|
go
| 1 | 0 | |
fonty/lib/terminal_size.py
|
'''fonty.lib.terminal_size
Cross platform module to retrieve size of terminal/cmd
Adapted from: https://gist.github.com/jtriley/1108174
'''
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
|
[] |
[] |
[
"LINES",
"COLUMNS"
] |
[]
|
["LINES", "COLUMNS"]
|
python
| 2 | 0 | |
reader_test.go
|
package kafka
import (
"bytes"
"context"
"fmt"
"io"
"math/rand"
"net"
"os"
"reflect"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestReader(t *testing.T) {
tests := []struct {
scenario string
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "calling Read with a context that has been canceled returns an error",
function: testReaderReadCanceled,
},
{
scenario: "all messages of the stream are returned when calling ReadMessage repeatedly",
function: testReaderReadMessages,
},
{
scenario: "test special offsets -1 and -2",
function: testReaderSetSpecialOffsets,
},
{
scenario: "setting the offset to random values returns the expected messages when Read is called",
function: testReaderSetRandomOffset,
},
{
scenario: "setting the offset by TimeStamp",
function: testReaderSetOffsetAt,
},
{
scenario: "calling Lag returns the lag of the last message read from kafka",
function: testReaderLag,
},
{
scenario: "calling ReadLag returns the current lag of a reader",
function: testReaderReadLag,
},
{ // https://github.com/deanMdreon/kafka-go/issues/30
scenario: "reading from an out-of-range offset waits until the context is cancelled",
function: testReaderOutOfRangeGetsCanceled,
},
}
for _, test := range tests {
testFunc := test.function
t.Run(test.scenario, func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: makeTopic(),
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
testFunc(t, ctx, r)
})
}
}
func testReaderReadCanceled(t *testing.T, ctx context.Context, r *Reader) {
ctx, cancel := context.WithCancel(ctx)
cancel()
if _, err := r.ReadMessage(ctx); err != context.Canceled {
t.Error(err)
}
}
func testReaderReadMessages(t *testing.T, ctx context.Context, r *Reader) {
const N = 1000
prepareReader(t, ctx, r, makeTestSequence(N)...)
var offset int64
for i := 0; i != N; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading message at offset", offset, "failed:", err)
return
}
offset = m.Offset + 1
v, _ := strconv.Atoi(string(m.Value))
if v != i {
t.Error("message at index", i, "has wrong value:", v)
return
}
}
}
func testReaderSetSpecialOffsets(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, ctx, r, Message{Value: []byte("first")})
prepareReader(t, ctx, r, makeTestSequence(3)...)
go func() {
time.Sleep(1 * time.Second)
prepareReader(t, ctx, r, Message{Value: []byte("last")})
}()
for _, test := range []struct {
off, final int64
want string
}{
{FirstOffset, 1, "first"},
{LastOffset, 5, "last"},
} {
offset := test.off
if err := r.SetOffset(offset); err != nil {
t.Error("setting offset", offset, "failed:", err)
}
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading at offset", offset, "failed:", err)
}
if string(m.Value) != test.want {
t.Error("message at offset", offset, "has wrong value:", string(m.Value))
}
if off := r.Offset(); off != test.final {
t.Errorf("bad final offset: got %d, want %d", off, test.final)
}
}
}
func testReaderSetRandomOffset(t *testing.T, ctx context.Context, r *Reader) {
const N = 10
prepareReader(t, ctx, r, makeTestSequence(N)...)
for i := 0; i != 2*N; i++ {
offset := rand.Intn(N)
r.SetOffset(int64(offset))
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("seeking to offset", offset, "failed:", err)
return
}
v, _ := strconv.Atoi(string(m.Value))
if v != offset {
t.Error("message at offset", offset, "has wrong value:", v)
return
}
}
}
func testReaderSetOffsetAt(t *testing.T, ctx context.Context, r *Reader) {
// We make 2 batches of messages here with a brief 2 second pause
// to ensure messages 0...9 will be written a few seconds before messages 10...19
// We'll then fetch the timestamp for message offset 10 and use that timestamp to set
// our reader
const N = 10
prepareReader(t, ctx, r, makeTestSequence(N)...)
time.Sleep(time.Second * 2)
prepareReader(t, ctx, r, makeTestSequence(N)...)
var ts time.Time
for i := 0; i < N*2; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("error reading message", err)
}
// grab the time for the 10th message
if i == 10 {
ts = m.Time
}
}
err := r.SetOffsetAt(ctx, ts)
if err != nil {
t.Fatal("error setting offset by timestamp", err)
}
m, err := r.ReadMessage(context.Background())
if err != nil {
t.Fatal("error reading message", err)
}
if m.Offset != 10 {
t.Errorf("expected offset of 10, received offset %d", m.Offset)
}
}
func testReaderLag(t *testing.T, ctx context.Context, r *Reader) {
const N = 5
prepareReader(t, ctx, r, makeTestSequence(N)...)
if lag := r.Lag(); lag != 0 {
t.Errorf("the initial lag value is %d but was expected to be 0", lag)
}
for i := 0; i != N; i++ {
r.ReadMessage(ctx)
expect := int64(N - (i + 1))
if lag := r.Lag(); lag != expect {
t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
}
}
}
func testReaderReadLag(t *testing.T, ctx context.Context, r *Reader) {
const N = 5
prepareReader(t, ctx, r, makeTestSequence(N)...)
if lag, err := r.ReadLag(ctx); err != nil {
t.Error(err)
} else if lag != N {
t.Errorf("the initial lag value is %d but was expected to be %d", lag, N)
}
for i := 0; i != N; i++ {
r.ReadMessage(ctx)
expect := int64(N - (i + 1))
if lag, err := r.ReadLag(ctx); err != nil {
t.Error(err)
} else if lag != expect {
t.Errorf("the lag value at offset %d is %d but was expected to be %d", i, lag, expect)
}
}
}
func testReaderOutOfRangeGetsCanceled(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, ctx, r, makeTestSequence(10)...)
const D = 100 * time.Millisecond
t0 := time.Now()
ctx, cancel := context.WithTimeout(ctx, D)
defer cancel()
if err := r.SetOffset(42); err != nil {
t.Error(err)
}
_, err := r.ReadMessage(ctx)
if err != context.DeadlineExceeded {
t.Error("bad error:", err)
}
t1 := time.Now()
if d := t1.Sub(t0); d < D {
t.Error("ReadMessage returned too early after", d)
}
}
func createTopic(t *testing.T, topic string, partitions int) {
t.Helper()
t.Logf("createTopic(%s, %d)", topic, partitions)
conn, err := Dial("tcp", "localhost:9092")
if err != nil {
err = fmt.Errorf("createTopic, Dial: %w", err)
t.Fatal(err)
}
defer conn.Close()
controller, err := conn.Controller()
if err != nil {
err = fmt.Errorf("createTopic, conn.Controller: %w", err)
t.Fatal(err)
}
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
if err != nil {
t.Fatal(err)
}
conn.SetDeadline(time.Now().Add(10 * time.Second))
_, err = conn.createTopics(createTopicsRequestV0{
Topics: []createTopicsRequestV0Topic{
{
Topic: topic,
NumPartitions: int32(partitions),
ReplicationFactor: 1,
},
},
Timeout: milliseconds(time.Second),
})
switch err {
case nil:
// ok
case TopicAlreadyExists:
// ok
default:
err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
t.Error(err)
t.FailNow()
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
waitForTopic(ctx, t, topic)
}
// Block until topic exists
func waitForTopic(ctx context.Context, t *testing.T, topic string) {
t.Helper()
for {
select {
case <-ctx.Done():
t.Fatalf("reached deadline before verifying topic existence")
default:
}
cli := &Client{
Addr: TCP("localhost:9092"),
Timeout: 5 * time.Second,
}
response, err := cli.Metadata(ctx, &MetadataRequest{
Addr: cli.Addr,
Topics: []string{topic},
})
if err != nil {
t.Fatalf("waitForTopic: error listing topics: %s", err.Error())
}
// Find a topic which has at least 1 partition in the metadata response
for _, top := range response.Topics {
if top.Name != topic {
continue
}
numPartitions := len(top.Partitions)
t.Logf("waitForTopic: found topic %q with %d partitions",
topic, numPartitions)
if numPartitions > 0 {
return
}
}
t.Logf("retrying after 1s")
time.Sleep(time.Second)
continue
}
}
func deleteTopic(t *testing.T, topic ...string) {
t.Helper()
conn, err := Dial("tcp", "localhost:9092")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
controller, err := conn.Controller()
if err != nil {
t.Fatal(err)
}
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
if err != nil {
t.Fatal(err)
}
conn.SetDeadline(time.Now().Add(10 * time.Second))
if err := conn.DeleteTopics(topic...); err != nil {
t.Fatal(err)
}
}
func TestReaderOnNonZeroPartition(t *testing.T) {
tests := []struct {
scenario string
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "topic and partition should now be included in header",
function: testReaderSetsTopicAndPartition,
},
}
for _, test := range tests {
testFunc := test.function
t.Run(test.scenario, func(t *testing.T) {
t.Parallel()
topic := makeTopic()
createTopic(t, topic, 2)
defer deleteTopic(t, topic)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
Partition: 1,
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
testFunc(t, ctx, r)
})
}
}
func testReaderSetsTopicAndPartition(t *testing.T, ctx context.Context, r *Reader) {
const N = 3
prepareReader(t, ctx, r, makeTestSequence(N)...)
for i := 0; i != N; i++ {
m, err := r.ReadMessage(ctx)
if err != nil {
t.Error("reading message failed:", err)
return
}
if m.Topic == "" {
t.Error("expected topic to be set")
return
}
if m.Topic != r.config.Topic {
t.Errorf("expected message to contain topic, %v; got %v", r.config.Topic, m.Topic)
return
}
if m.Partition != r.config.Partition {
t.Errorf("expected partition to be set; expected 1, got %v", m.Partition)
return
}
}
}
// TestReadTruncatedMessages uses a configuration designed to get the Broker to
// return truncated messages. It exercises the case where an earlier bug caused
// reading to time out by attempting to read beyond the current response. This
// test is not perfect, but it is pretty reliable about reproducing the issue.
//
// NOTE : it currently only succeeds against kafka 0.10.1.0, so it will be
// skipped. It's here so that it can be manually run.
func TestReadTruncatedMessages(t *testing.T) {
// todo : it would be great to get it to work against 0.11.0.0 so we could
// include it in CI unit tests.
t.Skip()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: makeTopic(),
MinBytes: 1,
MaxBytes: 100,
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
n := 500
prepareReader(t, ctx, r, makeTestSequence(n)...)
for i := 0; i < n; i++ {
if _, err := r.ReadMessage(ctx); err != nil {
t.Fatal(err)
}
}
}
func makeTestSequence(n int) []Message {
base := time.Now()
msgs := make([]Message, n)
for i := 0; i != n; i++ {
msgs[i] = Message{
Time: base.Add(time.Duration(i) * time.Millisecond).Truncate(time.Millisecond),
Value: []byte(strconv.Itoa(i)),
}
}
return msgs
}
func prepareReader(t *testing.T, ctx context.Context, r *Reader, msgs ...Message) {
config := r.Config()
var conn *Conn
var err error
for {
if conn, err = DialLeader(ctx, "tcp", "localhost:9092", config.Topic, config.Partition); err == nil {
break
}
select {
case <-time.After(time.Second):
case <-ctx.Done():
t.Fatal(ctx.Err())
}
}
defer conn.Close()
if _, err := conn.WriteMessages(msgs...); err != nil {
t.Fatal(err)
}
}
var (
benchmarkReaderOnce sync.Once
benchmarkReaderTopic = makeTopic()
benchmarkReaderPayload = make([]byte, 2*1024)
)
func BenchmarkReader(b *testing.B) {
const broker = "localhost:9092"
ctx := context.Background()
benchmarkReaderOnce.Do(func() {
conn, err := DialLeader(ctx, "tcp", broker, benchmarkReaderTopic, 0)
if err != nil {
b.Fatal(err)
}
defer conn.Close()
msgs := make([]Message, 1000)
for i := range msgs {
msgs[i].Value = benchmarkReaderPayload
}
for i := 0; i != 10; i++ { // put 10K messages
if _, err := conn.WriteMessages(msgs...); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
})
r := NewReader(ReaderConfig{
Brokers: []string{broker},
Topic: benchmarkReaderTopic,
Partition: 0,
MinBytes: 1e3,
MaxBytes: 1e6,
MaxWait: 100 * time.Millisecond,
})
for i := 0; i < b.N; i++ {
if (i % 10000) == 0 {
r.SetOffset(-1)
}
_, err := r.ReadMessage(ctx)
if err != nil {
b.Fatal(err)
}
}
r.Close()
b.SetBytes(int64(len(benchmarkReaderPayload)))
}
func TestCloseLeavesGroup(t *testing.T) {
if os.Getenv("KAFKA_VERSION") == "2.3.1" {
// There's a bug in 2.3.1 that causes the MemberMetadata to be in the wrong format and thus
// leads to an error when decoding the DescribeGroupsResponse.
//
// See https://issues.apache.org/jira/browse/KAFKA-9150 for details.
t.Skip("Skipping because kafka version is 2.3.1")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topic := makeTopic()
createTopic(t, topic, 1)
defer deleteTopic(t, topic)
groupID := makeGroupID()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
GroupID: groupID,
MinBytes: 1,
MaxBytes: 10e6,
MaxWait: 100 * time.Millisecond,
RebalanceTimeout: time.Second,
})
prepareReader(t, ctx, r, Message{Value: []byte("test")})
conn, err := Dial("tcp", r.config.Brokers[0])
if err != nil {
t.Fatalf("error dialing: %v", err)
}
defer conn.Close()
client, shutdown := newLocalClient()
defer shutdown()
descGroups := func() DescribeGroupsResponse {
resp, err := client.DescribeGroups(
ctx,
&DescribeGroupsRequest{
GroupIDs: []string{groupID},
},
)
if err != nil {
t.Fatalf("error from describeGroups %v", err)
}
return *resp
}
_, err = r.ReadMessage(ctx)
if err != nil {
t.Fatalf("our reader never joind its group or couldn't read a message: %v", err)
}
resp := descGroups()
if len(resp.Groups) != 1 {
t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
}
if len(resp.Groups[0].Members) != 1 {
t.Fatalf("expected group membership size of %d, but got %d", 1, len(resp.Groups[0].Members))
}
err = r.Close()
if err != nil {
t.Fatalf("unexpected error closing reader: %s", err.Error())
}
resp = descGroups()
if len(resp.Groups) != 1 {
t.Fatalf("expected 1 group. got: %d", len(resp.Groups))
}
if len(resp.Groups[0].Members) != 0 {
t.Fatalf("expected group membership size of %d, but got %d", 0, len(resp.Groups[0].Members))
}
}
func testConsumerGroupImmediateClose(t *testing.T, ctx context.Context, r *Reader) {
if err := r.Close(); err != nil {
t.Fatalf("bad err: %v", err)
}
}
func testConsumerGroupSimple(t *testing.T, ctx context.Context, r *Reader) {
if err := r.Close(); err != nil {
t.Fatalf("bad err: %v", err)
}
}
func TestReaderSetOffsetWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if err := r.SetOffset(LastOffset); err != errNotAvailableWithGroup {
t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
}
}
func TestReaderOffsetWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if offset := r.Offset(); offset != -1 {
t.Fatalf("expected -1; got %v", offset)
}
}
func TestReaderLagWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
if offset := r.Lag(); offset != -1 {
t.Fatalf("expected -1; got %v", offset)
}
}
func TestReaderReadLagReturnsZeroLagWhenConsumerGroupsEnabled(t *testing.T) {
r := &Reader{config: ReaderConfig{GroupID: "not-zero"}}
lag, err := r.ReadLag(context.Background())
if err != errNotAvailableWithGroup {
t.Fatalf("expected %v; got %v", errNotAvailableWithGroup, err)
}
if lag != 0 {
t.Fatalf("expected 0; got %d", lag)
}
}
func TestReaderPartitionWhenConsumerGroupsEnabled(t *testing.T) {
invoke := func() (boom bool) {
defer func() {
if r := recover(); r != nil {
boom = true
}
}()
NewReader(ReaderConfig{
GroupID: "set",
Partition: 1,
})
return false
}
if !invoke() {
t.Fatalf("expected panic; but NewReader worked?!")
}
}
func TestExtractTopics(t *testing.T) {
testCases := map[string]struct {
Members []GroupMember
Topics []string
}{
"nil": {},
"single member, single topic": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic"},
},
},
Topics: []string{"topic"},
},
"two members, single topic": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic"},
},
{
ID: "b",
Topics: []string{"topic"},
},
},
Topics: []string{"topic"},
},
"two members, two topics": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic-1"},
},
{
ID: "b",
Topics: []string{"topic-2"},
},
},
Topics: []string{"topic-1", "topic-2"},
},
"three members, three shared topics": {
Members: []GroupMember{
{
ID: "a",
Topics: []string{"topic-1", "topic-2"},
},
{
ID: "b",
Topics: []string{"topic-2", "topic-3"},
},
{
ID: "c",
Topics: []string{"topic-3", "topic-1"},
},
},
Topics: []string{"topic-1", "topic-2", "topic-3"},
},
}
for label, tc := range testCases {
t.Run(label, func(t *testing.T) {
topics := extractTopics(tc.Members)
if !reflect.DeepEqual(tc.Topics, topics) {
t.Errorf("expected %v; got %v", tc.Topics, topics)
}
})
}
}
func TestReaderConsumerGroup(t *testing.T) {
tests := []struct {
scenario string
partitions int
commitInterval time.Duration
function func(*testing.T, context.Context, *Reader)
}{
{
scenario: "basic handshake",
partitions: 1,
function: testReaderConsumerGroupHandshake,
},
{
scenario: "verify offset committed",
partitions: 1,
function: testReaderConsumerGroupVerifyOffsetCommitted,
},
{
scenario: "verify offset committed when using interval committer",
partitions: 1,
commitInterval: 400 * time.Millisecond,
function: testReaderConsumerGroupVerifyPeriodicOffsetCommitter,
},
{
scenario: "rebalance across many partitions and consumers",
partitions: 8,
function: testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers,
},
{
scenario: "consumer group commits on close",
partitions: 3,
function: testReaderConsumerGroupVerifyCommitsOnClose,
},
{
scenario: "consumer group rebalance",
partitions: 3,
function: testReaderConsumerGroupRebalance,
},
{
scenario: "consumer group rebalance across topics",
partitions: 3,
function: testReaderConsumerGroupRebalanceAcrossTopics,
},
{
scenario: "consumer group reads content across partitions",
partitions: 3,
function: testReaderConsumerGroupReadContentAcrossPartitions,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupImmediateClose,
},
{
scenario: "Close immediately after NewReader",
partitions: 1,
function: testConsumerGroupSimple,
},
}
for _, test := range tests {
t.Run(test.scenario, func(t *testing.T) {
// It appears that some of the tests depend on all these tests being
// run concurrently to pass... this is brittle and should be fixed
// at some point.
t.Parallel()
topic := makeTopic()
createTopic(t, topic, test.partitions)
defer deleteTopic(t, topic)
groupID := makeGroupID()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
GroupID: groupID,
HeartbeatInterval: 2 * time.Second,
CommitInterval: test.commitInterval,
RebalanceTimeout: 2 * time.Second,
RetentionTime: time.Hour,
MinBytes: 1,
MaxBytes: 1e6,
})
defer r.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
test.function(t, ctx, r)
})
}
}
func testReaderConsumerGroupHandshake(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(5)...)
m, err := r.ReadMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if m.Topic != r.config.Topic {
t.Errorf("topic not set")
}
if m.Offset != 0 {
t.Errorf("offset not set")
}
m, err = r.ReadMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if m.Topic != r.config.Topic {
t.Errorf("topic not set")
}
if m.Offset != 1 {
t.Errorf("offset not set")
}
}
func testReaderConsumerGroupVerifyOffsetCommitted(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
offsets := getOffsets(t, r.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupVerifyPeriodicOffsetCommitter(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
started := time.Now()
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
if elapsed := time.Now().Sub(started); elapsed > 10*time.Millisecond {
t.Errorf("background commits should happen nearly instantly")
}
// wait for committer to pick up the commits
time.Sleep(r.config.CommitInterval * 3)
offsets := getOffsets(t, r.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupVerifyCommitsOnClose(t *testing.T, ctx context.Context, r *Reader) {
prepareReader(t, context.Background(), r, makeTestSequence(3)...)
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("bad err: %v", err) // skip the first message
}
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad err: %v", err)
}
if err := r.CommitMessages(ctx, m); err != nil {
t.Errorf("bad commit message: %v", err)
}
if err := r.Close(); err != nil {
t.Errorf("bad Close: %v", err)
}
r2 := NewReader(r.config)
defer r2.Close()
offsets := getOffsets(t, r2.config)
if expected := map[int]int64{0: m.Offset + 1}; !reflect.DeepEqual(expected, offsets) {
t.Errorf("expected %v; got %v", expected, offsets)
}
}
func testReaderConsumerGroupReadContentAcrossPartitions(t *testing.T, ctx context.Context, r *Reader) {
const N = 12
client, shutdown := newLocalClient()
defer shutdown()
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
partitions := map[int]struct{}{}
for i := 0; i < N; i++ {
m, err := r.FetchMessage(ctx)
if err != nil {
t.Errorf("bad error: %s", err)
}
partitions[m.Partition] = struct{}{}
}
if v := len(partitions); v != 3 {
t.Errorf("expected messages across 3 partitions; got messages across %v partitions", v)
}
}
func testReaderConsumerGroupRebalance(t *testing.T, ctx context.Context, r *Reader) {
r2 := NewReader(r.config)
defer r.Close()
const (
N = 12
partitions = 2
)
client, shutdown := newLocalClient()
defer shutdown()
// rebalance should result in 12 message in each of the partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N*partitions)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// after rebalance, each reader should have a partition to itself
for i := 0; i < N; i++ {
if _, err := r2.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 2")
}
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 1")
}
}
}
func testReaderConsumerGroupRebalanceAcrossTopics(t *testing.T, ctx context.Context, r *Reader) {
// create a second reader that shares the groupID, but reads from a different topic
client, topic2, shutdown := newLocalClientAndTopic()
defer shutdown()
r2 := NewReader(ReaderConfig{
Brokers: r.config.Brokers,
Topic: topic2,
GroupID: r.config.GroupID,
HeartbeatInterval: r.config.HeartbeatInterval,
SessionTimeout: r.config.SessionTimeout,
RetentionTime: r.config.RetentionTime,
MinBytes: r.config.MinBytes,
MaxBytes: r.config.MaxBytes,
Logger: r.config.Logger,
})
defer r.Close()
prepareReader(t, ctx, r2, makeTestSequence(1)...)
const (
N = 12
)
// write messages across both partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// after rebalance, r2 should read topic2 and r1 should read ALL of the original topic
if _, err := r2.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 2")
}
// all N messages on the original topic should be read by the original reader
for i := 0; i < N; i++ {
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from reader 1")
}
}
}
func testReaderConsumerGroupRebalanceAcrossManyPartitionsAndConsumers(t *testing.T, ctx context.Context, r *Reader) {
// I've rebalanced up to 100 servers, but the rebalance can take upwards
// of a minute and that seems too long for unit tests. Also, setting this
// to a larger number seems to make the kafka broker unresponsive.
// TODO research if there's a way to reduce rebalance time across many partitions
// svls: the described behavior is due to the thundering herd of readers
// hitting the rebalance timeout. introducing the 100ms sleep in the
// loop below in order to give time for the sync group to finish has
// greatly helped, though we still hit the timeout from time to time.
const N = 8
var readers []*Reader
for i := 0; i < N-1; i++ {
reader := NewReader(r.config)
readers = append(readers, reader)
time.Sleep(100 * time.Millisecond)
}
defer func() {
for _, r := range readers {
r.Close()
time.Sleep(100 * time.Millisecond)
}
}()
client, shutdown := newLocalClient()
defer shutdown()
// write messages across both partitions
writer := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
Balancer: &RoundRobin{},
BatchSize: 1,
Transport: client.Transport,
}
if err := writer.WriteMessages(ctx, makeTestSequence(N*3)...); err != nil {
t.Fatalf("bad write messages: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("bad write err: %v", err)
}
// all N messages on the original topic should be read by the original reader
for i := 0; i < N-1; i++ {
if _, err := readers[i].FetchMessage(ctx); err != nil {
t.Errorf("reader %v expected to read 1 message", i)
}
}
if _, err := r.FetchMessage(ctx); err != nil {
t.Errorf("expect to read from original reader")
}
}
func TestOffsetStash(t *testing.T) {
const topic = "topic"
newMessage := func(partition int, offset int64) Message {
return Message{
Topic: topic,
Partition: partition,
Offset: offset,
}
}
tests := map[string]struct {
Given offsetStash
Messages []Message
Expected offsetStash
}{
"nil": {},
"empty given, single message": {
Given: offsetStash{},
Messages: []Message{newMessage(0, 0)},
Expected: offsetStash{
topic: {0: 1},
},
},
"ignores earlier offsets": {
Given: offsetStash{
topic: {0: 2},
},
Messages: []Message{newMessage(0, 0)},
Expected: offsetStash{
topic: {0: 2},
},
},
"uses latest offset": {
Given: offsetStash{},
Messages: []Message{
newMessage(0, 2),
newMessage(0, 3),
newMessage(0, 1),
},
Expected: offsetStash{
topic: {0: 4},
},
},
"uses latest offset, across multiple topics": {
Given: offsetStash{},
Messages: []Message{
newMessage(0, 2),
newMessage(0, 3),
newMessage(0, 1),
newMessage(1, 5),
newMessage(1, 6),
},
Expected: offsetStash{
topic: {
0: 4,
1: 7,
},
},
},
}
for label, test := range tests {
t.Run(label, func(t *testing.T) {
test.Given.merge(makeCommits(test.Messages...))
if !reflect.DeepEqual(test.Expected, test.Given) {
t.Errorf("expected %v; got %v", test.Expected, test.Given)
}
})
}
}
type mockOffsetCommitter struct {
invocations int
failCount int
err error
}
func (m *mockOffsetCommitter) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) {
m.invocations++
if m.failCount > 0 {
m.failCount--
return offsetCommitResponseV2{}, io.EOF
}
return offsetCommitResponseV2{}, nil
}
func TestValidateReader(t *testing.T) {
tests := []struct {
config ReaderConfig
errorOccured bool
}{
{config: ReaderConfig{}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1"}, errorOccured: false},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: -1}, errorOccured: true},
{config: ReaderConfig{Brokers: []string{"broker1"}, Topic: "topic1", Partition: 1, MinBytes: 5, MaxBytes: 6}, errorOccured: false},
}
for _, test := range tests {
err := test.config.Validate()
if test.errorOccured && err == nil {
t.Fail()
}
if !test.errorOccured && err != nil {
t.Fail()
}
}
}
func TestCommitLoopImmediateFlushOnGenerationEnd(t *testing.T) {
t.Parallel()
var committedOffset int64
var commitCount int
gen := &Generation{
conn: mockCoordinator{
offsetCommitFunc: func(r offsetCommitRequestV2) (offsetCommitResponseV2, error) {
commitCount++
committedOffset = r.Topics[0].Partitions[0].Offset
return offsetCommitResponseV2{}, nil
},
},
done: make(chan struct{}),
log: func(func(Logger)) {},
logError: func(func(Logger)) {},
joined: make(chan struct{}),
}
// initialize commits so that the commitLoopImmediate select statement blocks
r := &Reader{stctx: context.Background(), commits: make(chan commitRequest, 100)}
for i := 0; i < 100; i++ {
cr := commitRequest{
commits: []commit{{
topic: "topic",
partition: 0,
offset: int64(i) + 1,
}},
errch: make(chan<- error, 1),
}
r.commits <- cr
}
gen.Start(func(ctx context.Context) {
r.commitLoopImmediate(ctx, gen)
})
gen.close()
if committedOffset != 100 {
t.Fatalf("expected commited offset to be 100 but got %d", committedOffset)
}
if commitCount >= 100 {
t.Fatalf("expected a single final commit on generation end got %d", commitCount)
}
}
func TestCommitOffsetsWithRetry(t *testing.T) {
offsets := offsetStash{"topic": {0: 0}}
tests := map[string]struct {
Fails int
Invocations int
HasError bool
}{
"happy path": {
Invocations: 1,
},
"1 retry": {
Fails: 1,
Invocations: 2,
},
"out of retries": {
Fails: defaultCommitRetries + 1,
Invocations: defaultCommitRetries,
HasError: true,
},
}
for label, test := range tests {
t.Run(label, func(t *testing.T) {
count := 0
gen := &Generation{
conn: mockCoordinator{
offsetCommitFunc: func(offsetCommitRequestV2) (offsetCommitResponseV2, error) {
count++
if count <= test.Fails {
return offsetCommitResponseV2{}, io.EOF
}
return offsetCommitResponseV2{}, nil
},
},
done: make(chan struct{}),
log: func(func(Logger)) {},
logError: func(func(Logger)) {},
}
r := &Reader{stctx: context.Background()}
err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries)
switch {
case test.HasError && err == nil:
t.Error("bad err: expected not nil; got nil")
case !test.HasError && err != nil:
t.Errorf("bad err: expected nil; got %v", err)
}
})
}
}
// Test that a reader won't continually rebalance when there are more consumers
// than partitions in a group.
// https://github.com/deanMdreon/kafka-go/issues/200
func TestRebalanceTooManyConsumers(t *testing.T) {
ctx := context.Background()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
}
// Create the first reader and wait for it to become the leader.
r1 := NewReader(conf)
prepareReader(t, ctx, r1, makeTestSequence(1)...)
r1.ReadMessage(ctx)
// Clear the stats from the first rebalance.
r1.Stats()
// Second reader should cause one rebalance for each r1 and r2.
r2 := NewReader(conf)
// Wait for rebalances.
time.Sleep(5 * time.Second)
// Before the fix, r2 would cause continuous rebalances,
// as it tried to handshake() repeatedly.
rebalances := r1.Stats().Rebalances + r2.Stats().Rebalances
if rebalances > 2 {
t.Errorf("unexpected rebalances to first reader, got %d", rebalances)
}
}
func TestConsumerGroupWithMissingTopic(t *testing.T) {
t.Skip("this test doesn't work when the cluster is configured to auto-create topics")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, 1)
go func() {
_, err := r.ReadMessage(ctx)
recvErr <- err
}()
time.Sleep(time.Second)
client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: r.config.Topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{}); err != nil {
t.Fatalf("write error: %+v", err)
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != 1 {
t.Fatalf("expected to receive one message, but got %d", nMsgs)
}
}
func TestConsumerGroupWithTopic(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
Topic: makeTopic(),
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, len(conf.GroupTopics))
go func() {
msg, err := r.ReadMessage(ctx)
t.Log(msg)
recvErr <- err
}()
time.Sleep(conf.MaxWait)
client, shutdown := newLocalClientWithTopic(conf.Topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: conf.Topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, "Writer:"),
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{Value: []byte(conf.Topic)}); err != nil {
t.Fatalf("write error: %+v", err)
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != 1 {
t.Fatalf("expected to receive 1 message, but got %d", nMsgs)
}
}
func TestConsumerGroupWithGroupTopicsSingle(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
GroupTopics: []string{makeTopic()},
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
defer r.Close()
recvErr := make(chan error, len(conf.GroupTopics))
go func() {
msg, err := r.ReadMessage(ctx)
t.Log(msg)
recvErr <- err
}()
time.Sleep(conf.MaxWait)
for i, topic := range conf.GroupTopics {
client, shutdown := newLocalClientWithTopic(topic, 1)
defer shutdown()
w := &Writer{
Addr: TCP(r.config.Brokers...),
Topic: topic,
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, fmt.Sprintf("Writer(%d):", i)),
}
defer w.Close()
if err := w.WriteMessages(ctx, Message{Value: []byte(topic)}); err != nil {
t.Fatalf("write error: %+v", err)
}
}
if err := <-recvErr; err != nil {
t.Fatalf("read error: %+v", err)
}
nMsgs := r.Stats().Messages
if nMsgs != int64(len(conf.GroupTopics)) {
t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
}
}
func TestConsumerGroupWithGroupTopicsMultple(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, shutdown := newLocalClient()
defer shutdown()
conf := ReaderConfig{
Brokers: []string{"localhost:9092"},
GroupID: makeGroupID(),
GroupTopics: []string{makeTopic(), makeTopic()},
MaxWait: time.Second,
PartitionWatchInterval: 100 * time.Millisecond,
WatchPartitionChanges: true,
Logger: newTestKafkaLogger(t, "Reader:"),
}
r := NewReader(conf)
w := &Writer{
Addr: TCP(r.config.Brokers...),
BatchTimeout: 10 * time.Millisecond,
BatchSize: 1,
Transport: client.Transport,
Logger: newTestKafkaLogger(t, "Writer:"),
}
defer w.Close()
time.Sleep(time.Second)
msgs := make([]Message, 0, len(conf.GroupTopics))
for _, topic := range conf.GroupTopics {
msgs = append(msgs, Message{Topic: topic})
}
if err := w.WriteMessages(ctx, msgs...); err != nil {
t.Logf("write error: %+v", err)
}
wg := new(sync.WaitGroup)
wg.Add(len(msgs))
go func() {
wg.Wait()
t.Log("closing reader")
r.Close()
}()
for {
msg, err := r.ReadMessage(ctx)
if err != nil {
if err == io.EOF {
t.Log("reader closed")
break
}
t.Fatalf("read error: %+v", err)
} else {
t.Logf("message read: %+v", msg)
wg.Done()
}
}
nMsgs := r.Stats().Messages
if nMsgs != int64(len(conf.GroupTopics)) {
t.Fatalf("expected to receive %d messages, but got %d", len(conf.GroupTopics), nMsgs)
}
}
func getOffsets(t *testing.T, config ReaderConfig) map[int]int64 {
// minimal config required to lookup coordinator
cg := ConsumerGroup{
config: ConsumerGroupConfig{
ID: config.GroupID,
Brokers: config.Brokers,
Dialer: config.Dialer,
},
}
conn, err := cg.coordinator()
if err != nil {
t.Errorf("unable to connect to coordinator: %v", err)
}
defer conn.Close()
offsets, err := conn.offsetFetch(offsetFetchRequestV1{
GroupID: config.GroupID,
Topics: []offsetFetchRequestV1Topic{{
Topic: config.Topic,
Partitions: []int32{0},
}},
})
if err != nil {
t.Errorf("bad fetchOffsets: %v", err)
}
m := map[int]int64{}
for _, r := range offsets.Responses {
if r.Topic == config.Topic {
for _, p := range r.PartitionResponses {
m[int(p.Partition)] = p.Offset
}
}
}
return m
}
const (
connTO = 1 * time.Second
connTestTO = 2 * connTO
)
func TestErrorCannotConnect(t *testing.T) {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9093"},
Dialer: &Dialer{Timeout: connTO},
MaxAttempts: 1,
Topic: makeTopic(),
})
ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
defer cancel()
_, err := r.FetchMessage(ctx)
if err == nil || ctx.Err() != nil {
t.Errorf("Reader.FetchMessage must fail when it cannot " +
"connect")
}
}
func TestErrorCannotConnectGroupSubscription(t *testing.T) {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9093"},
Dialer: &Dialer{Timeout: 1 * time.Second},
GroupID: "foobar",
MaxAttempts: 1,
Topic: makeTopic(),
})
ctx, cancel := context.WithTimeout(context.Background(), connTestTO)
defer cancel()
_, err := r.FetchMessage(ctx)
if err == nil || ctx.Err() != nil {
t.Errorf("Reader.FetchMessage with a group subscription " +
"must fail when it cannot connect")
}
}
// Tests that the reader can handle messages where the response is truncated
// due to reaching MaxBytes.
//
// If MaxBytes is too small to fit 1 record then it will never truncate, so
// we start from a small message size and increase it until we are sure
// truncation has happened at some point.
func TestReaderTruncatedResponse(t *testing.T) {
topic := makeTopic()
createTopic(t, topic, 1)
defer deleteTopic(t, topic)
readerMaxBytes := 100
batchSize := 4
maxMsgPadding := 5
readContextTimeout := 10 * time.Second
var msgs []Message
// The key of each message
n := 0
// `i` is the amount of padding per message
for i := 0; i < maxMsgPadding; i++ {
bb := bytes.Buffer{}
for x := 0; x < i; x++ {
_, err := bb.WriteRune('0')
require.NoError(t, err)
}
padding := bb.Bytes()
// `j` is the number of times the message repeats
for j := 0; j < batchSize*4; j++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("%05d", n)),
Value: padding,
})
n++
}
}
wr := NewWriter(WriterConfig{
Brokers: []string{"localhost:9092"},
BatchSize: batchSize,
Async: false,
Topic: topic,
Balancer: &LeastBytes{},
})
err := wr.WriteMessages(context.Background(), msgs...)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), readContextTimeout)
defer cancel()
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
MinBytes: 1,
MaxBytes: readerMaxBytes,
// Speed up testing
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
expectedKeys := map[string]struct{}{}
for _, k := range msgs {
expectedKeys[string(k.Key)] = struct{}{}
}
keys := map[string]struct{}{}
for {
m, err := r.FetchMessage(ctx)
require.NoError(t, err)
keys[string(m.Key)] = struct{}{}
t.Logf("got key %s have %d keys expect %d\n", string(m.Key), len(keys), len(expectedKeys))
if len(keys) == len(expectedKeys) {
require.Equal(t, expectedKeys, keys)
return
}
}
}
// Tests that the reader can read record batches from log compacted topics
// where the batch ends with compacted records.
//
// This test forces varying sized chunks of duplicated messages along with
// configuring the topic with a minimal `segment.bytes` in order to
// guarantee that at least 1 batch can be compacted down to 0 "unread" messages
// with at least 1 "old" message otherwise the batch is skipped entirely.
func TestReaderReadCompactedMessage(t *testing.T) {
topic := makeTopic()
createTopicWithCompaction(t, topic, 1)
defer deleteTopic(t, topic)
msgs := makeTestDuplicateSequence()
writeMessagesForCompactionCheck(t, topic, msgs)
expectedKeys := map[string]int{}
for _, msg := range msgs {
expectedKeys[string(msg.Key)] = 1
}
// kafka 2.0.1 is extra slow
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel()
for {
success := func() bool {
r := NewReader(ReaderConfig{
Brokers: []string{"localhost:9092"},
Topic: topic,
MinBytes: 200,
MaxBytes: 200,
// Speed up testing
MaxWait: 100 * time.Millisecond,
})
defer r.Close()
keys := map[string]int{}
for {
m, err := r.FetchMessage(ctx)
if err != nil {
t.Logf("can't get message from compacted log: %v", err)
return false
}
keys[string(m.Key)]++
if len(keys) == countKeys(msgs) {
t.Logf("got keys: %+v", keys)
return reflect.DeepEqual(keys, expectedKeys)
}
}
}()
if success {
return
}
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
default:
}
}
}
// writeMessagesForCompactionCheck writes messages with specific writer configuration
func writeMessagesForCompactionCheck(t *testing.T, topic string, msgs []Message) {
t.Helper()
wr := NewWriter(WriterConfig{
Brokers: []string{"localhost:9092"},
// Batch size must be large enough to have multiple compacted records
// for testing more edge cases.
BatchSize: 3,
Async: false,
Topic: topic,
Balancer: &LeastBytes{},
})
err := wr.WriteMessages(context.Background(), msgs...)
require.NoError(t, err)
}
// makeTestDuplicateSequence creates messages for compacted log testing
//
// All keys and values are 4 characters long to tightly control how many
// messages are per log segment.
func makeTestDuplicateSequence() []Message {
var msgs []Message
// `n` is an increasing counter so it is never compacted.
n := 0
// `i` determines how many compacted records to create
for i := 0; i < 5; i++ {
// `j` is how many times the current pattern repeats. We repeat because
// as long as we have a pattern that is slightly larger/smaller than
// the log segment size then if we repeat enough it will eventually
// try all configurations.
for j := 0; j < 30; j++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("%04d", n)),
Value: []byte(fmt.Sprintf("%04d", n)),
})
n++
// This produces the duplicated messages to compact.
for k := 0; k < i; k++ {
msgs = append(msgs, Message{
Key: []byte("dup_"),
Value: []byte("dup_"),
})
}
}
}
// "end markers" to force duplicate message outside of the last segment of
// the log so that they can all be compacted.
for i := 0; i < 10; i++ {
msgs = append(msgs, Message{
Key: []byte(fmt.Sprintf("e-%02d", i)),
Value: []byte(fmt.Sprintf("e-%02d", i)),
})
}
return msgs
}
// countKeys counts unique keys from given Message slice
func countKeys(msgs []Message) int {
m := make(map[string]struct{})
for _, msg := range msgs {
m[string(msg.Key)] = struct{}{}
}
return len(m)
}
func createTopicWithCompaction(t *testing.T, topic string, partitions int) {
t.Helper()
t.Logf("createTopic(%s, %d)", topic, partitions)
conn, err := Dial("tcp", "localhost:9092")
require.NoError(t, err)
defer conn.Close()
controller, err := conn.Controller()
require.NoError(t, err)
conn, err = Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
require.NoError(t, err)
conn.SetDeadline(time.Now().Add(10 * time.Second))
err = conn.CreateTopics(TopicConfig{
Topic: topic,
NumPartitions: partitions,
ReplicationFactor: 1,
ConfigEntries: []ConfigEntry{
{
ConfigName: "cleanup.policy",
ConfigValue: "compact",
},
{
ConfigName: "segment.bytes",
ConfigValue: "200",
},
},
})
switch err {
case nil:
// ok
case TopicAlreadyExists:
// ok
default:
require.NoError(t, err)
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
waitForTopic(ctx, t, topic)
}
|
[
"\"KAFKA_VERSION\""
] |
[] |
[
"KAFKA_VERSION"
] |
[]
|
["KAFKA_VERSION"]
|
go
| 1 | 0 | |
vendor/github.com/hashicorp/terraform/builtin/providers/google/resource_google_project_services_test.go
|
package google
import (
"bytes"
"fmt"
"log"
"os"
"reflect"
"sort"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/servicemanagement/v1"
)
// Test that services can be enabled and disabled on a project
func TestAccGoogleProjectServices_basic(t *testing.T) {
pid := "terraform-" + acctest.RandString(10)
services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
services2 := []string{"cloudresourcemanager.googleapis.com"}
oobService := "iam.googleapis.com"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
// Create a new project with some services
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic(services1, pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services1, pid),
),
},
// Update services to remove one
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services2, pid),
),
},
// Add a service out-of-band and ensure it is removed
resource.TestStep{
PreConfig: func() {
config := testAccProvider.Meta().(*Config)
enableService(oobService, pid, config)
},
Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services2, pid),
),
},
},
})
}
// Test that services are authoritative when a project has existing
// sevices not represented in config
func TestAccGoogleProjectServices_authoritative(t *testing.T) {
pid := "terraform-" + acctest.RandString(10)
services := []string{"cloudresourcemanager.googleapis.com"}
oobService := "iam.googleapis.com"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
// Create a new project with no services
resource.TestStep{
Config: testAccGoogleProject_create(pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
),
},
// Add a service out-of-band, then apply a config that creates a service.
// It should remove the out-of-band service.
resource.TestStep{
PreConfig: func() {
config := testAccProvider.Meta().(*Config)
enableService(oobService, pid, config)
},
Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
// Test that services are authoritative when a project has existing
// sevices, some which are represented in the config and others
// that are not
func TestAccGoogleProjectServices_authoritative2(t *testing.T) {
pid := "terraform-" + acctest.RandString(10)
oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
services := []string{"iam.googleapis.com"}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
// Create a new project with no services
resource.TestStep{
Config: testAccGoogleProject_create(pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
),
},
// Add a service out-of-band, then apply a config that creates a service.
// It should remove the out-of-band service.
resource.TestStep{
PreConfig: func() {
config := testAccProvider.Meta().(*Config)
for _, s := range oobServices {
enableService(s, pid, config)
}
},
Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com)
// don't end up causing diffs when they are enabled as a side-effect of a different service's
// enablement.
func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
pid := "terraform-" + acctest.RandString(10)
services := []string{
"dataproc.googleapis.com",
// The following services are enabled as a side-effect of dataproc's enablement
"storage-component.googleapis.com",
"deploymentmanager.googleapis.com",
"replicapool.googleapis.com",
"replicapoolupdater.googleapis.com",
"resourceviews.googleapis.com",
"compute-component.googleapis.com",
"container.googleapis.com",
"containerregistry.googleapis.com",
"storage-api.googleapis.com",
"pubsub.googleapis.com",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
func TestAccGoogleProjectServices_manyServices(t *testing.T) {
skipIfEnvNotSet(t,
[]string{
"GOOGLE_ORG",
"GOOGLE_BILLING_ACCOUNT",
}...,
)
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
pid := "terraform-" + acctest.RandString(10)
services := []string{
"bigquery-json.googleapis.com",
"cloudbuild.googleapis.com",
"cloudfunctions.googleapis.com",
"cloudresourcemanager.googleapis.com",
"cloudtrace.googleapis.com",
"compute-component.googleapis.com",
"container.googleapis.com",
"containerregistry.googleapis.com",
"dataflow.googleapis.com",
"dataproc.googleapis.com",
"deploymentmanager.googleapis.com",
"dns.googleapis.com",
"endpoints.googleapis.com",
"iam.googleapis.com",
"logging.googleapis.com",
"ml.googleapis.com",
"monitoring.googleapis.com",
"pubsub.googleapis.com",
"replicapool.googleapis.com",
"replicapoolupdater.googleapis.com",
"resourceviews.googleapis.com",
"runtimeconfig.googleapis.com",
"servicecontrol.googleapis.com",
"servicemanagement.googleapis.com",
"sourcerepo.googleapis.com",
"spanner.googleapis.com",
"storage-api.googleapis.com",
"storage-component.googleapis.com",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
Check: resource.ComposeTestCheckFunc(
testProjectServicesMatch(services, pid),
),
},
},
})
}
func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string {
return fmt.Sprintf(`
resource "google_project" "acceptance" {
project_id = "%s"
name = "%s"
org_id = "%s"
}
resource "google_project_services" "acceptance" {
project = "${google_project.acceptance.project_id}"
services = [%s]
}
`, pid, name, org, testStringsToString(services))
}
func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string {
return fmt.Sprintf(`
resource "google_project" "acceptance" {
project_id = "%s"
name = "%s"
org_id = "%s"
billing_account = "%s"
}
resource "google_project_services" "acceptance" {
project = "${google_project.acceptance.project_id}"
services = [%s]
}
`, pid, name, org, billing, testStringsToString(services))
}
func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
apiServices, err := getApiServices(pid, config)
if err != nil {
return fmt.Errorf("Error listing services for project %q: %v", pid, err)
}
sort.Strings(services)
sort.Strings(apiServices)
if !reflect.DeepEqual(services, apiServices) {
return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices)
}
return nil
}
}
func testStringsToString(s []string) string {
var b bytes.Buffer
for i, v := range s {
b.WriteString(fmt.Sprintf("\"%s\"", v))
if i < len(s)-1 {
b.WriteString(",")
}
}
r := b.String()
log.Printf("[DEBUG]: Converted list of strings to %s", r)
return b.String()
}
func testManagedServicesToString(svcs []*servicemanagement.ManagedService) string {
var b bytes.Buffer
for _, s := range svcs {
b.WriteString(s.ServiceName)
}
return b.String()
}
|
[
"\"GOOGLE_BILLING_ACCOUNT\"",
"\"GOOGLE_BILLING_ACCOUNT\""
] |
[] |
[
"GOOGLE_BILLING_ACCOUNT"
] |
[]
|
["GOOGLE_BILLING_ACCOUNT"]
|
go
| 1 | 0 | |
main.go
|
/*
Copyright © 2020 Josa Gesell <[email protected]>
*/
package main
import (
"os"
"github.com/josa42/project/cmd"
"github.com/josa42/project/pkg/logger"
)
func main() {
if lf := os.Getenv("PROJECT_LOG_FILE"); lf != "" {
defer logger.InitLogger(lf)()
}
cmd.Execute()
}
|
[
"\"PROJECT_LOG_FILE\""
] |
[] |
[
"PROJECT_LOG_FILE"
] |
[]
|
["PROJECT_LOG_FILE"]
|
go
| 1 | 0 | |
internal/strategy/strategy.go
|
package strategy
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/signal"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
log "github.com/sirupsen/logrus"
"github.com/utkuozdemir/pv-migrate/internal/pvc"
"github.com/utkuozdemir/pv-migrate/migration"
"gopkg.in/yaml.v3"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/cli/values"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/storage/driver"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
const (
Mnt2Strategy = "mnt2"
SvcStrategy = "svc"
LbSvcStrategy = "lbsvc"
LocalStrategy = "local"
helmValuesYAMLIndent = 2
srcMountPath = "/source"
destMountPath = "/dest"
)
var (
DefaultStrategies = []string{Mnt2Strategy, SvcStrategy, LbSvcStrategy}
AllStrategies = []string{Mnt2Strategy, SvcStrategy, LbSvcStrategy, LocalStrategy}
nameToStrategy = map[string]Strategy{
Mnt2Strategy: &Mnt2{},
SvcStrategy: &Svc{},
LbSvcStrategy: &LbSvc{},
LocalStrategy: &Local{},
}
helmProviders = getter.All(cli.New())
ErrStrategyNotFound = errors.New("strategy not found")
)
type Strategy interface {
// Run runs the migration for the given task execution.
//
// This is the actual implementation of the migration.
Run(a *migration.Attempt) (bool, error)
}
func GetStrategiesMapForNames(names []string) (map[string]Strategy, error) {
sts := make(map[string]Strategy)
for _, name := range names {
s, ok := nameToStrategy[name]
if !ok {
return nil, fmt.Errorf("%w: %s", ErrStrategyNotFound, name)
}
sts[name] = s
}
return sts, nil
}
func registerCleanupHook(attempt *migration.Attempt, releaseNames []string) chan<- bool {
doneCh := make(chan bool)
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case <-signalCh:
attempt.Logger.Warn(":large_orange_diamond: Received termination signal")
cleanup(attempt, releaseNames)
os.Exit(1)
case <-doneCh:
return
}
}()
return doneCh
}
func cleanupAndReleaseHook(a *migration.Attempt, releaseNames []string, doneCh chan<- bool) {
cleanup(a, releaseNames)
doneCh <- true
}
func cleanup(a *migration.Attempt, releaseNames []string) {
mig := a.Migration
logger := a.Logger
logger.Info(":broom: Cleaning up")
var result *multierror.Error
for _, info := range []*pvc.Info{mig.SourceInfo, mig.DestInfo} {
for _, name := range releaseNames {
err := cleanupForPVC(logger, name, info)
if err != nil {
result = multierror.Append(result, err)
}
}
}
if err := result.ErrorOrNil(); err != nil {
logger.WithError(err).
Warn(":large_orange_diamond: Cleanup failed, you might want to clean up manually")
return
}
logger.Info(":sparkles: Cleanup done")
}
func cleanupForPVC(logger *log.Entry, helmReleaseName string, pvcInfo *pvc.Info) error {
ac, err := initHelmActionConfig(logger, pvcInfo)
if err != nil {
return err
}
uninstall := action.NewUninstall(ac)
uninstall.Wait = true
uninstall.Timeout = 1 * time.Minute
_, err = uninstall.Run(helmReleaseName)
if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) && !apierrors.IsNotFound(err) {
return err
}
return nil
}
func initHelmActionConfig(logger *log.Entry, pvcInfo *pvc.Info) (*action.Configuration, error) {
actionConfig := new(action.Configuration)
err := actionConfig.Init(pvcInfo.ClusterClient.RESTClientGetter,
pvcInfo.Claim.Namespace, os.Getenv("HELM_DRIVER"), logger.Debugf)
if err != nil {
return nil, err
}
return actionConfig, nil
}
func getMergedHelmValues(helmValuesFile string, request *migration.Request) (map[string]interface{}, error) {
allValuesFiles := append([]string{helmValuesFile}, request.HelmValuesFiles...)
valsOptions := values.Options{
Values: request.HelmValues,
ValueFiles: allValuesFiles,
StringValues: request.HelmStringValues,
FileValues: request.HelmFileValues,
}
return valsOptions.MergeValues(helmProviders)
}
func installHelmChart(attempt *migration.Attempt, pvcInfo *pvc.Info, name string,
values map[string]interface{},
) error {
helmValuesFile, err := writeHelmValuesToTempFile(attempt.ID, values)
if err != nil {
return err
}
defer func() { _ = os.Remove(helmValuesFile) }()
helmActionConfig, err := initHelmActionConfig(attempt.Logger, pvcInfo)
if err != nil {
return err
}
install := action.NewInstall(helmActionConfig)
install.Namespace = pvcInfo.Claim.Namespace
install.ReleaseName = name
install.Wait = true
install.Timeout = 1 * time.Minute
mig := attempt.Migration
vals, err := getMergedHelmValues(helmValuesFile, mig.Request)
if err != nil {
return err
}
_, err = install.Run(mig.Chart, vals)
return err
}
func writeHelmValuesToTempFile(id string, vals map[string]interface{}) (string, error) {
file, err := ioutil.TempFile("", fmt.Sprintf("pv-migrate-vals-%s-*.yaml", id))
if err != nil {
return "", err
}
defer func() { _ = file.Close() }()
encoder := yaml.NewEncoder(file)
encoder.SetIndent(helmValuesYAMLIndent)
err = encoder.Encode(vals)
if err != nil {
return "", err
}
return file.Name(), nil
}
|
[
"\"HELM_DRIVER\""
] |
[] |
[
"HELM_DRIVER"
] |
[]
|
["HELM_DRIVER"]
|
go
| 1 | 0 | |
extensions/cauchy/tuning_setup.py
|
import os
from setuptools import setup
from pathlib import Path
import torch.cuda
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
extensions_dir = Path(os.getenv('TUNING_SOURCE_DIR')).absolute()
assert extensions_dir.exists()
source_files=[
'cauchy.cpp',
'cauchy_cuda.cu',
]
sources = [str(extensions_dir / name) for name in source_files]
extension_name = os.getenv('TUNING_EXTENSION_NAME', default='cauchy_mult_tuning')
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
extension_name,
sources,
include_dirs=[extensions_dir],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
# 'nvcc': ['-O2', '-lineinfo']
'nvcc': ['-O2', '-lineinfo', '--use_fast_math']
}
)
ext_modules.append(extension)
setup(
name=extension_name,
ext_modules=ext_modules,
# cmdclass={'build_ext': BuildExtension.with_options(use_ninja=False)})
cmdclass={'build_ext': BuildExtension})
|
[] |
[] |
[
"TUNING_EXTENSION_NAME",
"TUNING_SOURCE_DIR"
] |
[]
|
["TUNING_EXTENSION_NAME", "TUNING_SOURCE_DIR"]
|
python
| 2 | 0 | |
pkg/cmd/git/setup/setup.go
|
package setup
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/jenkins-x/jx-helpers/v3/pkg/boot"
"github.com/jenkins-x/jx-helpers/v3/pkg/files"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient/cli"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient/credentialhelper"
"github.com/jenkins-x/jx-helpers/v3/pkg/gitclient/giturl"
"github.com/jenkins-x/jx-helpers/v3/pkg/homedir"
"github.com/jenkins-x/jx-helpers/v3/pkg/kube"
"github.com/jenkins-x/jx-helpers/v3/pkg/termcolor"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/jenkins-x/jx-gitops/pkg/rootcmd"
"github.com/jenkins-x/jx-helpers/v3/pkg/cmdrunner"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/helper"
"github.com/jenkins-x/jx-helpers/v3/pkg/cobras/templates"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
cmdLong = templates.LongDesc(`
Sets up git to ensure the git user name and email is setup.
This is typically used in a pipeline to ensure git can do commits.
`)
cmdExample = templates.Examples(`
%s git setup
`)
)
// Options the options for the command
type Options struct {
Dir string
UserName string
UserEmail string
OutputFile string
Namespace string
OperatorNamespace string
SecretName string
DisableInClusterTest bool
KubeClient kubernetes.Interface
CommandRunner cmdrunner.CommandRunner
gitClient gitclient.Interface
}
// NewCmdGitSetup creates a command object for the command
func NewCmdGitSetup() (*cobra.Command, *Options) {
o := &Options{}
cmd := &cobra.Command{
Use: "setup",
Short: "Sets up git to ensure the git user name and email is setup",
Long: cmdLong,
Example: fmt.Sprintf(cmdExample, rootcmd.BinaryName),
Run: func(cmd *cobra.Command, args []string) {
err := o.Run()
helper.CheckErr(err)
},
}
cmd.Flags().StringVarP(&o.Dir, "dir", "d", "", "the directory to run the git push command from")
cmd.Flags().StringVarP(&o.UserName, "name", "n", "", "the git user name to use if one is not setup")
cmd.Flags().StringVarP(&o.UserEmail, "email", "e", "", "the git user email to use if one is not setup")
cmd.Flags().StringVarP(&o.OutputFile, "credentials-file", "", "", "The destination of the git credentials file to generate. If not specified uses $XDG_CONFIG_HOME/git/credentials or $HOME/git/credentials")
cmd.Flags().StringVarP(&o.OperatorNamespace, "operator-namespace", "", "jx-git-operator", "the namespace used by the git operator to find the secret for the git repository if running in cluster")
cmd.Flags().StringVarP(&o.Namespace, "namespace", "", "", "the namespace used to find the git operator secret for the git repository if running in cluster. Defaults to the current namespace")
cmd.Flags().StringVarP(&o.SecretName, "secret", "", "jx-boot", "the name of the Secret to find the git URL, username and password for creating a git credential if running inside the cluster")
cmd.Flags().BoolVarP(&o.DisableInClusterTest, "fake-in-cluster", "", false, "for testing: lets you fake running this command inside a kubernetes cluster so that it can create the file: $XDG_CONFIG_HOME/git/credentials or $HOME/git/credentials")
return cmd, o
}
// Run implements the command
func (o *Options) Run() error {
gitClient := o.GitClient()
// lets make sure there's a git config home dir
homeDir := GetConfigHome()
err := os.MkdirAll(homeDir, files.DefaultDirWritePermissions)
if err != nil {
return errors.Wrapf(err, "failed to ensure git config home directory exists %s", homeDir)
}
// lets fetch the credentials so we can default the UserName if its not specified
credentials, err := o.findCredentials()
if err != nil {
return errors.Wrap(err, "creating git credentials")
}
_, _, err = gitclient.SetUserAndEmail(gitClient, o.Dir, o.UserName, o.UserEmail, o.DisableInClusterTest)
if err != nil {
return errors.Wrapf(err, "failed to setup git user and email")
}
err = gitclient.SetCredentialHelper(gitClient, "")
if err != nil {
return errors.Wrapf(err, "failed to setup credential store")
}
if o.DisableInClusterTest || IsInCluster() {
outFile, err := o.determineOutputFile()
if err != nil {
return errors.Wrap(err, "unable to determine for git credentials")
}
return o.createGitCredentialsFile(outFile, credentials)
}
return nil
}
func (o *Options) GitClient() gitclient.Interface {
if o.gitClient == nil {
o.gitClient = cli.NewCLIClient("", o.CommandRunner)
}
return o.gitClient
}
// findCredentials detects the git operator secret so we have default credentials
func (o *Options) findCredentials() ([]credentialhelper.GitCredential, error) {
var credentialList []credentialhelper.GitCredential
var err error
o.KubeClient, o.Namespace, err = kube.LazyCreateKubeClientAndNamespace(o.KubeClient, o.Namespace)
if err != nil {
return nil, errors.Wrapf(err, "failed to create kube client")
}
bootSecret, err := boot.LoadBootSecret(o.KubeClient, o.Namespace, o.OperatorNamespace, o.SecretName, o.UserName)
if err != nil {
return nil, errors.Wrapf(err, "failed to load the boot secret")
}
if bootSecret == nil {
return nil, errors.Errorf("failed to find the boot secret")
}
gitURL := bootSecret.URL
gitProviderURL := bootSecret.GitProviderURL
if gitURL != "" && gitProviderURL == "" {
// lets convert the git URL into a provider URL
gitInfo, err := giturl.ParseGitURL(gitURL)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse git URL %s", gitURL)
}
gitProviderURL = gitInfo.HostURL()
}
if o.UserName == "" {
o.UserName = bootSecret.Username
}
password := bootSecret.Password
credential, err := credentialhelper.CreateGitCredentialFromURL(gitProviderURL, o.UserName, password)
if err != nil {
return nil, errors.Wrapf(err, "invalid git auth information")
}
credentialList = append(credentialList, credential)
return credentialList, nil
}
func (o *Options) determineOutputFile() (string, error) {
outFile := o.OutputFile
if outFile == "" {
outFile = GitCredentialsFile()
}
dir, _ := filepath.Split(outFile)
if dir != "" {
err := os.MkdirAll(dir, files.DefaultDirWritePermissions)
if err != nil {
return "", err
}
}
return outFile, nil
}
// CreateGitCredentialsFileFromUsernameAndToken creates the git credentials into file using the provided username, token & url
func (o *Options) createGitCredentialsFile(fileName string, credentials []credentialhelper.GitCredential) error {
data, err := o.GitCredentialsFileData(credentials)
if err != nil {
return errors.Wrap(err, "creating git credentials")
}
if err := ioutil.WriteFile(fileName, data, files.DefaultDirWritePermissions); err != nil {
return fmt.Errorf("failed to write to %s: %s", fileName, err)
}
log.Logger().Infof("Generated Git credentials file %s", termcolor.ColorInfo(fileName))
return nil
}
// GitCredentialsFileData takes the given git credentials and writes them into a byte array.
func (o *Options) GitCredentialsFileData(credentials []credentialhelper.GitCredential) ([]byte, error) {
var buffer bytes.Buffer
for _, gitCredential := range credentials {
u, err := gitCredential.URL()
if err != nil {
log.Logger().Warnf("Ignoring incomplete git credentials %q", gitCredential)
continue
}
buffer.WriteString(u.String() + "\n")
// Write the https protocol in case only https is set for completeness
if u.Scheme == "http" {
u.Scheme = "https"
buffer.WriteString(u.String() + "\n")
}
}
return buffer.Bytes(), nil
}
// IsInCluster tells if we are running incluster
func IsInCluster() bool {
_, err := rest.InClusterConfig()
return err == nil
}
// GitCredentialsFile returns the location of the git credentials file
func GitCredentialsFile() string {
cfgHome := GetConfigHome()
return filepath.Join(cfgHome, "git", "credentials")
}
// GetConfigHome returns the home dir
func GetConfigHome() string {
cfgHome := os.Getenv("XDG_CONFIG_HOME")
if cfgHome == "" {
cfgHome = homedir.HomeDir()
}
if cfgHome == "" {
cfgHome = "."
}
return cfgHome
}
|
[
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
go
| 1 | 0 | |
src/test/java/com/saucelabs/junit/WebDriverWithHelperJSONTest.java
|
package com.saucelabs.junit;
import com.saucelabs.common.SauceOnDemandAuthentication;
import com.saucelabs.common.SauceOnDemandSessionIdProvider;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.openqa.selenium.Platform;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
import klipfolio.saas.api.APITesting_Utils;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static org.junit.Assert.assertEquals;
/**
* This WebDriverWithHelperParametersTest modifies WebDriverWithHelperTest by adding JUnit
* Parameterized runners and a three argument constructor to loop through an array and
* run tests against all of the browser/platform combinations specified in the array.
*
* Each array element (browser/platform combination) runs as a new Sauce job that displays in
* the Jenkins project's Sauce Jobs Report with pass/fail results. A job detail report with
* video is provided for each of these Sauce jobs.
*
* The pass/fail result for each test is created via the <a href="https://github.com/saucelabs/sauce-java/tree/master/junit">Sauce JUnit</a> helper classes,
* which use the Sauce REST API to mark each Sauce job (each test) as passed/failed.
*
* In order to use the {@link com.saucelabs.junit.SauceOnDemandTestWatcher} to see if the tests pass or fail
* in the Sauce Jobs Report in your Jenkins projects, this test must implement the
* {@link com.saucelabs.common.SauceOnDemandSessionIdProvider} interface as discussed in the code comments below.
*
* @author Ross Rowe
* @author Bernie Cohen - modified to support parameterized testing against multiple environments
*/
@RunWith(Parameterized.class)
public class WebDriverWithHelperJSONTest implements SauceOnDemandSessionIdProvider {
private WebDriver webDriver;
private static DesiredCapabilities capabilities;
private static Platform ANDROID, LINUX, MAC, UNIX, VISTA, WINDOWS, XP, platformValue;
private String browser, browserVersion, platform, sessionId = "";
private final String ADMIN_EMAIL = "[email protected]";
private final String ADMIN_PWD = "d4shb0ard";
private final String DPN_TEST_DB_ID = "f1d7d05d4c4bdcc0e494b6504c6592a2";
private final long NEWVAL = System.nanoTime();
public String browser = System.getProperty("aut.browser");
public String server = System.getProperty("aut.server");
// Create an array of available platforms from the "private static Platform" declaration above
Platform[] platformValues = Platform.values();
public Platform setPlatformCapabilities(String platformParam) {
String platformVal = platformParam;
for (int p=0; p<platformValues.length; p++) {
platformValue = platformValues[p++];
if (platformValue.toString() == platformVal) break;
}
return platformValue;
}
/**
* Constructs a {@link com.saucelabs.common.SauceOnDemandAuthentication} instance using the supplied Sauce
* user name and access key. To use the authentication supplied by environment variables or
* from an external file, use the no-arg {@link com.saucelabs.common.SauceOnDemandAuthentication} constructor.
*/
public SauceOnDemandAuthentication authentication = new SauceOnDemandAuthentication();
/**
* JUnit Rule that marks Sauce Jobs as passed/failed when the test succeeds or fails.
*/
public @Rule
SauceOnDemandTestWatcher resultReportingTestWatcher = new SauceOnDemandTestWatcher(this, authentication);
/**
* JUnit Rule that records the test name of the current test. When this is referenced
* during the creation of {@link org.openqa.selenium.remote.DesiredCapabilities}, the test method name is assigned
* to the Sauce Job name and recorded in Jenkins Console Output and in the Sauce Jobs
* Report in the Jenkins project's home page.
*/
public @Rule TestName testName = new TestName();
/**
* JUnit annotation that runs each test once for each item in a Collection.
*
* Feel free to add as many additional parameters as you like to the capabilitiesParams array.
*
* Note: If you add parameters for the MAC platform, make sure that you have Mac minutes in
* your <a href="https://saucelabs.com/login">Sauce account</a> or the test will fail.
*/
@Parameters
public static Collection<Object[]> data() {
String json = System.getenv("SAUCE_ONDEMAND_BROWSERS");
if (json == null) {
json = System.getenv("bamboo_SAUCE_ONDEMAND_BROWSERS");
}
List<Object[]> browsers = new ArrayList<Object[]>();
JSONArray browserArray = null;
try {
browserArray = new JSONArray(json);
for (int i =0;i<browserArray.length();i++) {
JSONObject browserJSON = browserArray.getJSONObject(i);
browsers.add(new Object[]{browserJSON.get("browser"), browserJSON.get("browser-version"), browserJSON.get("os")});
}
} catch (JSONException e) {
e.printStackTrace();
}
return browsers;
}
public WebDriverWithHelperJSONTest(String s1, String s2, String s3) {
browser = s1;
browserVersion = s2;
platform = s3;
}
/**
* Creates a new {@link org.openqa.selenium.remote.RemoteWebDriver} instance that is used to run WebDriver tests
* using Sauce.
*
* @throws Exception thrown if an error occurs constructing the WebDriver
*/
@Test
public void validateTitle() throws Exception {
capabilities = new DesiredCapabilities(browser, browserVersion, setPlatformCapabilities(platform));
capabilities.setCapability("name", this.getClass().getName() + "." + testName.getMethodName());
this.webDriver = new RemoteWebDriver(
new URL("http://" + authentication.getUsername() + ":" + authentication.getAccessKey() + "@ondemand.saucelabs.com:80/wd/hub"),
capabilities);
this.sessionId = ((RemoteWebDriver)webDriver).getSessionId().toString();
APITesting_Utils.setRestAssuredDefaults(ADMIN_EMAIL, ADMIN_PWD);
//Update the datasource
given().
body("{\"value\":\"" + NEWVAL + "\"}").
expect().
statusCode(200).
body("meta.status", equalTo(200)).
body("meta.success", equalTo(true)).
put(APITesting_Utils.DATASOURCES_PATH + "/" + DPN_TEST_DB_ID + APITesting_Utils.DATA_PATH);
if (browserVersion == "") browserVersion = "unspecified";
String browserName = String.format("%-19s", browser).replaceAll(" ", ".").replaceFirst("[.]", " ");
String browserVer = String.format("%-19s", browserVersion).replaceAll(" ", ".");
System.out.println("@Test testDPN() testing browser/version: " + browserName + browserVer + "platform: " + platform);
webDriver.get(server + "/login");
webDriver.manage().window().maximize();
assertEquals("Sign In to Klipfolio Dashboard", webDriver.getTitle());
//webDriver.get(server + "/dashboard?useDpn=" + dpn);
if (webDriver != null) {
try {
webDriver.get(server + "/users/logout");
} finally {
webDriver.quit();
}
}
}
@Override
public String getSessionId() {
return sessionId;
}
}
|
[
"\"SAUCE_ONDEMAND_BROWSERS\"",
"\"bamboo_SAUCE_ONDEMAND_BROWSERS\""
] |
[] |
[
"SAUCE_ONDEMAND_BROWSERS",
"bamboo_SAUCE_ONDEMAND_BROWSERS"
] |
[]
|
["SAUCE_ONDEMAND_BROWSERS", "bamboo_SAUCE_ONDEMAND_BROWSERS"]
|
java
| 2 | 0 | |
go/mysql/endtoend/main_test.go
|
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endtoend
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"testing"
"vitess.io/vitess/go/mysql"
vtenv "vitess.io/vitess/go/vt/env"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/tlstest"
"vitess.io/vitess/go/vt/vttest"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
var (
connParams mysql.ConnParams
)
// assertSQLError makes sure we get the right error.
func assertSQLError(t *testing.T, err error, code int, sqlState string, subtext string, query string) {
t.Helper()
if err == nil {
t.Fatalf("was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext)
}
serr, ok := err.(*mysql.SQLError)
if !ok {
t.Fatalf("was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err)
}
if serr.Num != code {
t.Fatalf("was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num)
}
if serr.State != sqlState {
t.Fatalf("was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State)
}
if subtext != "" && !strings.Contains(serr.Message, subtext) {
t.Fatalf("was expecting SQLError %v / %v / %v but got message %v", code, sqlState, subtext, serr.Message)
}
if serr.Query != query {
t.Fatalf("was expecting SQLError %v / %v / %v with Query '%v' but got query '%v'", code, sqlState, subtext, query, serr.Query)
}
}
// runMysql forks a mysql command line process connecting to the provided server.
func runMysql(t *testing.T, params *mysql.ConnParams, command string) (string, bool) {
dir, err := vtenv.VtMysqlRoot()
if err != nil {
t.Fatalf("vtenv.VtMysqlRoot failed: %v", err)
}
name, err := binaryPath(dir, "mysql")
if err != nil {
t.Fatalf("binaryPath failed: %v", err)
}
// The args contain '-v' 3 times, to switch to very verbose output.
// In particular, it has the message:
// Query OK, 1 row affected (0.00 sec)
version, getErr := mysqlctl.GetVersionString()
f, v, err := mysqlctl.ParseVersionString(version)
if getErr != nil || err != nil {
f, v, err = mysqlctl.GetVersionFromEnv()
if err != nil {
vtenvMysqlRoot, _ := vtenv.VtMysqlRoot()
message := fmt.Sprintf(`could not auto-detect MySQL version. You may need to set your PATH so a mysqld binary can be found, or set the environment variable MYSQL_FLAVOR if mysqld is not available locally:
PATH: %s
VT_MYSQL_ROOT: %s
VTROOT: %s
vtenv.VtMysqlRoot(): %s
MYSQL_FLAVOR: %s
`,
os.Getenv("PATH"),
os.Getenv("VT_MYSQL_ROOT"),
os.Getenv("VTROOT"),
vtenvMysqlRoot,
os.Getenv("MYSQL_FLAVOR"))
panic(message)
}
}
t.Logf("Using flavor: %v, version: %v", f, v)
args := []string{
"-v", "-v", "-v",
}
args = append(args, "-e", command)
if params.UnixSocket != "" {
args = append(args, "-S", params.UnixSocket)
} else {
args = append(args,
"-h", params.Host,
"-P", fmt.Sprintf("%v", params.Port))
}
if params.Uname != "" {
args = append(args, "-u", params.Uname)
}
if params.Pass != "" {
args = append(args, "-p"+params.Pass)
}
if params.DbName != "" {
args = append(args, "-D", params.DbName)
}
if params.SslEnabled() {
if f == mysqlctl.FlavorMySQL && v.Major == 5 && v.Minor == 7 || v.Major == 8 {
args = append(args,
fmt.Sprintf("--ssl-mode=%s", params.EffectiveSslMode()))
} else {
args = append(args,
"--ssl",
"--ssl-verify-server-cert")
}
args = append(args,
"--ssl-ca", params.SslCa,
"--ssl-cert", params.SslCert,
"--ssl-key", params.SslKey)
}
env := []string{
"LD_LIBRARY_PATH=" + path.Join(dir, "lib/mysql"),
}
cmd := exec.Command(name, args...)
cmd.Env = env
cmd.Dir = dir
out, err := cmd.CombinedOutput()
output := string(out)
if err != nil {
return output, false
}
return output, true
}
// binaryPath does a limited path lookup for a command,
// searching only within sbin and bin in the given root.
//
// FIXME(alainjobart) move this to vt/env, and use it from
// go/vt/mysqlctl too.
func binaryPath(root, binary string) (string, error) {
subdirs := []string{"sbin", "bin"}
for _, subdir := range subdirs {
binPath := path.Join(root, subdir, binary)
if _, err := os.Stat(binPath); err == nil {
return binPath, nil
}
}
return "", fmt.Errorf("%s not found in any of %s/{%s}",
binary, root, strings.Join(subdirs, ","))
}
func TestMain(m *testing.M) {
flag.Parse() // Do not remove this comment, import into google3 depends on it
exitCode := func() int {
// Create the certs.
root, err := ioutil.TempDir("", "TestTLSServer")
if err != nil {
fmt.Fprintf(os.Stderr, "TempDir failed: %v", err)
return 1
}
defer os.RemoveAll(root)
tlstest.CreateCA(root)
tlstest.CreateSignedCert(root, tlstest.CA, "01", "server", "localhost")
tlstest.CreateSignedCert(root, tlstest.CA, "02", "client", "Client Cert")
// Create the extra SSL my.cnf lines.
cnf := fmt.Sprintf(`
ssl-ca=%v/ca-cert.pem
ssl-cert=%v/server-cert.pem
ssl-key=%v/server-key.pem
`, root, root, root)
extraMyCnf := path.Join(root, "ssl_my.cnf")
if err := ioutil.WriteFile(extraMyCnf, []byte(cnf), os.ModePerm); err != nil {
fmt.Fprintf(os.Stderr, "ioutil.WriteFile(%v) failed: %v", extraMyCnf, err)
return 1
}
// For LargeQuery tests
cnf = "max_allowed_packet=100M\n"
maxPacketMyCnf := path.Join(root, "max_packet.cnf")
if err := ioutil.WriteFile(maxPacketMyCnf, []byte(cnf), os.ModePerm); err != nil {
fmt.Fprintf(os.Stderr, "ioutil.WriteFile(%v) failed: %v", maxPacketMyCnf, err)
return 1
}
// Launch MySQL.
// We need a Keyspace in the topology, so the DbName is set.
// We need a Shard too, so the database 'vttest' is created.
cfg := vttest.Config{
Topology: &vttestpb.VTTestTopology{
Keyspaces: []*vttestpb.Keyspace{
{
Name: "vttest",
Shards: []*vttestpb.Shard{
{
Name: "0",
DbNameOverride: "vttest",
},
},
},
},
},
OnlyMySQL: true,
ExtraMyCnf: []string{extraMyCnf, maxPacketMyCnf},
}
cluster := vttest.LocalCluster{
Config: cfg,
}
if err := cluster.Setup(); err != nil {
fmt.Fprintf(os.Stderr, "could not launch mysql: %v\n", err)
return 1
}
defer cluster.TearDown()
connParams = cluster.MySQLConnParams()
// Add the SSL parts, but they're not enabled until
// the flag is set.
connParams.SslCa = path.Join(root, "ca-cert.pem")
connParams.SslCert = path.Join(root, "client-cert.pem")
connParams.SslKey = path.Join(root, "client-key.pem")
// Uncomment to sleep and be able to connect to MySQL
// fmt.Printf("Connect to MySQL using parameters:\n")
// json.NewEncoder(os.Stdout).Encode(connParams)
// time.Sleep(10 * time.Minute)
return m.Run()
}()
os.Exit(exitCode)
}
|
[
"\"PATH\"",
"\"VT_MYSQL_ROOT\"",
"\"VTROOT\"",
"\"MYSQL_FLAVOR\""
] |
[] |
[
"MYSQL_FLAVOR",
"VT_MYSQL_ROOT",
"PATH",
"VTROOT"
] |
[]
|
["MYSQL_FLAVOR", "VT_MYSQL_ROOT", "PATH", "VTROOT"]
|
go
| 4 | 0 | |
src/test/java/io/epirus/console/ProjectTest.java
|
/*
* Copyright 2020 Web3 Labs Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package io.epirus.console;
import java.io.File;
import java.io.IOException;
import java.security.InvalidAlgorithmParameterException;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import io.epirus.console.config.ConfigManager;
import io.epirus.console.project.utils.ClassExecutor;
import io.epirus.console.project.utils.Folders;
import org.junit.jupiter.api.BeforeEach;
import org.web3j.crypto.CipherException;
import org.web3j.crypto.WalletUtils;
public class ProjectTest extends ClassExecutor {
protected static File workingDirectory = Folders.tempBuildFolder();
protected String absoluteWalletPath;
@BeforeEach
public void createEpirusProject()
throws IOException, NoSuchAlgorithmException, NoSuchProviderException,
InvalidAlgorithmParameterException, CipherException {
ConfigManager.setDevelopment();
final File testWalletDirectory =
new File(workingDirectory.getPath() + File.separator + "keystore");
testWalletDirectory.mkdirs();
absoluteWalletPath =
testWalletDirectory
+ File.separator
+ WalletUtils.generateNewWalletFile("", testWalletDirectory);
final String[] args = {"new", "-p", "org.com", "-n", "Test", "-o" + workingDirectory};
int result = new EpirusCommand(System.getenv(), args).parse();
if (result != 0) {
throw new RuntimeException("Failed to generate test project");
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
executor/mock/standalone/standalone_test.go
|
package standalone
import (
"flag"
"fmt"
"math/rand"
"os"
"reflect"
"runtime"
"strconv"
"strings"
"testing"
"time"
"io/ioutil"
"github.com/Netflix/titus-executor/api/netflix/titus"
"github.com/Netflix/titus-executor/executor/mock"
"github.com/Netflix/titus-executor/executor/runtime/docker"
"github.com/mesos/mesos-go/mesosproto"
"github.com/pborman/uuid"
log "github.com/sirupsen/logrus"
"gopkg.in/urfave/cli.v1"
)
var standalone bool
func init() {
if debug, err := strconv.ParseBool(os.Getenv("DEBUG")); err == nil && debug {
log.SetLevel(log.DebugLevel)
}
flag.BoolVar(&standalone, "standalone", false, "Enable standalone tests")
flag.Parse()
app := cli.NewApp()
app.Flags = docker.Flags
app.Writer = ioutil.Discard
_ = app.Run(os.Args)
}
type testImage struct {
name string
tag string
}
var (
alpine = testImage{
name: "titusoss/alpine",
tag: "3.5",
}
ubuntu = testImage{
name: "titusoss/ubuntu-test",
tag: "20171025-1508915634",
}
byDigest = testImage{
name: "titusoss/by-digest",
tag: "latest",
}
bigImage = testImage{
name: "titusoss/big-image",
tag: "20171025-1508900976",
}
noEntrypoint = testImage{
name: "titusoss/no-entrypoint-test",
tag: "20171109-1510275133",
}
ignoreSignals = testImage{
name: "titusoss/ignore-signals",
tag: "20180122-1516662139",
}
)
// This file still uses log as opposed to using the testing library's built-in logging framework.
// Since we do not configure Logrus, we will just log to stderr.
func TestStandalone(t *testing.T) {
if !standalone {
t.Skipf("Standalone tests are not enabled! Activate with the -standalone cmdline flag.")
}
testFunctions := []func(*testing.T){
testSimpleJob,
testNoCapPtraceByDefault,
testCanAddCapabilities,
testDefaultCapabilities,
testStdoutGoesToLogFile,
testStderrGoesToLogFile,
testImageByDigest,
testImageByDigestIgnoresTag,
testImageInvalidDigestFails,
testImageNonExistingDigestFails,
testImagePullError,
testBadEntrypoint,
testNoEntrypoint,
testCanWriteInLogsAndSubDirs,
testShutdown,
testCancelPullBigImage,
testMetadataProxyInjection,
testMetdataProxyDefaultRoute,
testSimpleJobWithBadEnvironment,
testTerminateTimeout,
}
for _, fun := range testFunctions {
fullName := runtime.FuncForPC(reflect.ValueOf(fun).Pointer()).Name()
splitName := strings.Split(fullName, ".")
funName := splitName[len(splitName)-1]
t.Run(strings.Title(funName), makeTestParallel(fun))
}
}
func makeTestParallel(f func(*testing.T)) func(*testing.T) {
return func(t *testing.T) {
t.Parallel()
f(t)
}
}
func testSimpleJob(t *testing.T) {
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: "echo Hello Titus",
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testSimpleJobWithBadEnvironment(t *testing.T) {
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: "echo Hello Titus",
Environment: map[string]string{
"ksrouter.filter.xpath.expression": `(XXXXX("XXXXXX") = "XXXXXXX" XXX XXX XXXXX("XXXX") XX ("XXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX")) XX (XXXXX("XXXXXX") = "XXXXXXXX" XXX XXX XXXXX("XXXX") XX ("XXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXX", "XXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXXXXXX", "XXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXX", "XXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXX")) XX (XXXXX("XXXXXX") = "XXX" XXX XXX XXXXX("XXXX") XX ("XXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXX", "XXX", "XXXXXXXXXXXXXXXXXXXXXXXXXX")) XX (XXXXX("XXXXXX") = "XXX" XXX XXXXX("XXXX") XX ("XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXX")) XX (XXXXX("XXXXXX") = "XXXX" XXX XXXXX("XXXX") XX ("XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXX", "XXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX", "XXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX", "XXXXXXXXXXX", "XXXXXXXXXXXXX", "XXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXXXXXX"))`,
"BAD": `"`,
"AlsoBAD": "",
},
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testNoCapPtraceByDefault(t *testing.T) {
ji := &mock.JobInput{
ImageName: ubuntu.name,
Version: ubuntu.tag,
Entrypoint: "/bin/sh -c '! (/sbin/capsh --print | tee /logs/no-ptrace.log | grep sys_ptrace')",
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testCanAddCapabilities(t *testing.T) {
ji := &mock.JobInput{
ImageName: ubuntu.name,
Version: ubuntu.tag,
Entrypoint: "/bin/sh -c '/sbin/capsh --print | tee /logs/ptrace.log | grep sys_ptrace'",
Capabilities: &titus.ContainerInfo_Capabilities{
Add: []titus.ContainerInfo_Capabilities_Capability{
titus.ContainerInfo_Capabilities_SYS_PTRACE,
},
},
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
// ensure the default capability set matches what docker and rkt do:
// https://github.com/docker/docker/blob/master/oci/defaults_linux.go#L62-L77
// https://github.com/appc/spec/blob/master/spec/ace.md#linux-isolators
func testDefaultCapabilities(t *testing.T) {
ji := &mock.JobInput{
ImageName: ubuntu.name,
Version: ubuntu.tag,
// Older kernels (3.13 on jenkins) have a different bitmask, so we check both the new and old formats
Entrypoint: `/bin/bash -c 'cat /proc/self/status | tee /logs/capabilities.log | egrep "CapEff:\s+(00000020a80425fb|00000000a80425fb)"'`,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testStdoutGoesToLogFile(t *testing.T) {
message := fmt.Sprintf("Some message with ID=%s, and a suffix.", uuid.New())
cmd := fmt.Sprintf(`sh -c 'echo "%[1]s" && sleep 1 && grep "%[1]s" /logs/stdout'`, message)
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: cmd,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testStderrGoesToLogFile(t *testing.T) {
message := fmt.Sprintf("Some message with ID=%s, and a suffix.", uuid.New())
cmd := fmt.Sprintf(`sh -c 'echo "%[1]s" >&2 && sleep 1 && grep "%[1]s" /logs/stderr'`, message)
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: cmd,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testImageByDigest(t *testing.T) {
digest := "sha256:2fc24d2a383c452ffe1332a60f94c618f34ece3e400c0b30c8f943bd7aeec033"
cmd := `grep not-latest /etc/who-am-i`
ji := &mock.JobInput{
ImageName: byDigest.name,
ImageDigest: digest,
Entrypoint: cmd,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testImageByDigestIgnoresTag(t *testing.T) {
digest := "sha256:2fc24d2a383c452ffe1332a60f94c618f34ece3e400c0b30c8f943bd7aeec033"
cmd := `grep not-latest /etc/who-am-i`
ji := &mock.JobInput{
ImageName: byDigest.name,
Version: "20171024-1508896310", // should be ignored
// This version (tag) of the image has the digest:
// sha256:652d2dd17041cb520feae4de0a976df29af4cd1d002d19ec7c8d5204f8ab1518
// and it doesn't have not-latest in /etc/who-am-i
ImageDigest: digest,
Entrypoint: cmd,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testImageInvalidDigestFails(t *testing.T) {
digest := "some-invalid-digest"
ji := &mock.JobInput{
ImageName: byDigest.name,
Version: "latest", // should be ignored
ImageDigest: digest,
Entrypoint: fmt.Sprintf(`/bin/true`),
}
status, err := mock.RunJob(ji, false)
if err != nil {
t.Fatal(err)
}
if status != mesosproto.TaskState_TASK_FAILED.String() {
t.Fatalf("Expected status=FAILED, got: %s", status)
}
}
func testImageNonExistingDigestFails(t *testing.T) {
digest := "sha256:12345123456c6f231ea3adc7960cc7f753ebb0099999999999999a9b4dfdfdcd"
ji := &mock.JobInput{
ImageName: byDigest.name,
ImageDigest: digest,
Entrypoint: fmt.Sprintf(`/bin/true`),
}
status, err := mock.RunJob(ji, false)
if err != nil {
t.Fatal(err)
}
if status != mesosproto.TaskState_TASK_FAILED.String() {
t.Fatalf("Expected status=FAILED, got: %s", status)
}
}
func testImagePullError(t *testing.T) {
ji := &mock.JobInput{
ImageName: alpine.name,
Version: "latest1",
Entrypoint: "/usr/bin/true",
}
status, err := mock.RunJob(ji, false)
if err != nil {
t.Fatal(err)
}
if status != mesosproto.TaskState_TASK_FAILED.String() {
t.Fatalf("Expected status=FAILED, got: %s", status)
}
}
func testCancelPullBigImage(t *testing.T) { // nolint: gocyclo
jobRunner := mock.NewJobRunner()
bigImageJobID := fmt.Sprintf("Skynet-%v%v", rand.Intn(1000), time.Now().Second())
testResultBigImage := jobRunner.StartJob(&mock.JobInput{
JobID: bigImageJobID,
ImageName: bigImage.name,
Version: bigImage.tag,
})
select {
case taskStatus := <-testResultBigImage.UpdateChan:
if taskStatus.State.String() != "TASK_STARTING" {
t.Fatal("Task never observed in TASK_STARTING, instead: ", taskStatus)
}
case <-time.After(15 * time.Second):
t.Fatal("Spent too long waiting for task starting")
}
if err := jobRunner.KillTask(); err != nil {
t.Fatal("Could not stop task: ", err)
}
timeOut := time.After(30 * time.Second)
for {
select {
case taskStatus := <-testResultBigImage.UpdateChan:
// t.Log("Observed task status: ", taskStatus)
if taskStatus.State.String() == "TASK_RUNNING" {
t.Fatalf("Task %s started after killTask %v", testResultBigImage.TaskID, taskStatus)
}
if taskStatus.State.String() == "TASK_KILLED" || taskStatus.State.String() == "TASK_LOST" {
t.Logf("Task %s successfully terminated with status %s", testResultBigImage.TaskID, taskStatus.State.String())
goto big_task_killed
}
case <-timeOut:
t.Fatal("Cancel failed to stop job in time")
}
}
big_task_killed:
// We do this here, otherwise a stuck executor can prevent this from exiting.
jobRunner.StopExecutor()
}
func testBadEntrypoint(t *testing.T) {
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: "bad",
}
// We expect this to fail
if mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testNoEntrypoint(t *testing.T) {
ji := &mock.JobInput{
ImageName: noEntrypoint.name,
Version: noEntrypoint.tag,
}
// We expect this to fail
if mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testCanWriteInLogsAndSubDirs(t *testing.T) {
cmd := `sh -c "mkdir -p /logs/prana && echo begining > /logs/prana/prana.log && ` +
`mv /logs/prana/prana.log /logs/prana/prana-2016.log && echo ending >> /logs/out"`
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: cmd,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testShutdown(t *testing.T) {
ji := &mock.JobInput{
ImageName: alpine.name,
Version: alpine.tag,
Entrypoint: "sleep 6000",
}
jobRunner := mock.NewJobRunner()
testResult := jobRunner.StartJob(ji)
taskRunning := make(chan bool, 10)
go func() {
for {
select {
case status := <-testResult.UpdateChan:
if status.State.String() == "TASK_RUNNING" {
taskRunning <- true
} else if mock.IsTerminalState(status.State) {
if status.State.String() != "TASK_KILLED" {
t.Errorf("Task %s not killed successfully, %s!", testResult.TaskID, status.State.String())
}
taskRunning <- false
return
}
case <-time.After(time.Second * 60):
t.Errorf("Task %s did not reach RUNNING - timed out", testResult.TaskID)
taskRunning <- false
return
}
}
}()
<-taskRunning
t.Logf("Task is running, stopping executor")
jobRunner.StopExecutor()
}
func testMetadataProxyInjection(t *testing.T) {
ji := &mock.JobInput{
ImageName: ubuntu.name,
Version: ubuntu.tag,
Entrypoint: "/bin/bash -c 'curl -sf http://169.254.169.254/latest/meta-data/local-ipv4 | grep 1.2.3.4'",
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testMetdataProxyDefaultRoute(t *testing.T) {
ji := &mock.JobInput{
ImageName: ubuntu.name,
Version: ubuntu.tag,
Entrypoint: `/bin/bash -c 'curl -sf --interface $(ip route get 4.2.2.2|grep -E -o "src [0-9.]+"|cut -f2 -d" ") http://169.254.169.254/latest/meta-data/local-ipv4'`,
}
if !mock.RunJobExpectingSuccess(ji, false) {
t.Fail()
}
}
func testTerminateTimeout(t *testing.T) {
// Start the executor
jobRunner := mock.NewJobRunner()
defer jobRunner.StopExecutorAsync()
// Submit a job that runs for a long time and does
// NOT exit on SIGTERM
ji := &mock.JobInput{
ImageName: ignoreSignals.name,
Version: ignoreSignals.tag,
KillWaitSeconds: 20,
}
jobResponse := jobRunner.StartJob(ji)
// Wait until the task is running
for {
status := <-jobResponse.UpdateChan
if status.State.String() == "TASK_RUNNING" {
break
}
}
// Submit a request to kill the job. Since the
// job does not exit on SIGTERM we expect the kill
// to take at least 20 seconds
killTime := time.Now()
if err := jobRunner.KillTask(); err != nil {
t.Fail()
}
for status := range jobResponse.UpdateChan {
if mock.IsTerminalState(status.State) {
if status.State.String() != "TASK_KILLED" {
t.Fail()
}
if time.Since(killTime) < 20*time.Second {
t.Fatal("Task was killed too quickly")
}
return
}
}
t.Fail()
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
HackerRank Solutions/Java/Grading Students.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
class Result {
/*
* Complete the 'gradingStudents' function below.
*
* The function is expected to return an INTEGER_ARRAY.
* The function accepts INTEGER_ARRAY grades as parameter.
*/
public static List<Integer> gradingStudents(List<Integer> grades) {
List<Integer> newGrades = new ArrayList<Integer>();
for (Integer i : grades) {
if (i < 38) {
newGrades.add(i);
} else {
if ((i % 5) == 4 || (i % 5) == 3) {
newGrades.add(i + 5 - (i % 5));
} else {
newGrades.add(i);
}
}
}
return newGrades;
}
}
public class Solution {
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int gradesCount = Integer.parseInt(bufferedReader.readLine().trim());
List<Integer> grades = new ArrayList<>();
for (int i = 0; i < gradesCount; i++) {
int gradesItem = Integer.parseInt(bufferedReader.readLine().trim());
grades.add(gradesItem);
}
List<Integer> result = Result.gradingStudents(grades);
for (int i = 0; i < result.size(); i++) {
bufferedWriter.write(String.valueOf(result.get(i)));
if (i != result.size() - 1) {
bufferedWriter.write("\n");
}
}
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
calicoctl/commands/node/run.go
|
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"bufio"
"fmt"
"io/ioutil"
gonet "net"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/docopt/docopt-go"
"github.com/projectcalico/calicoctl/calicoctl/commands/argutils"
"github.com/projectcalico/calicoctl/calicoctl/commands/clientmgr"
"github.com/projectcalico/calicoctl/calicoctl/commands/constants"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/net"
log "github.com/sirupsen/logrus"
)
const (
ETCD_KEY_NODE_FILE = "/etc/calico/certs/key.pem"
ETCD_CERT_NODE_FILE = "/etc/calico/certs/cert.crt"
ETCD_CA_CERT_NODE_FILE = "/etc/calico/certs/ca_cert.crt"
AUTODETECTION_METHOD_FIRST = "first-found"
AUTODETECTION_METHOD_CAN_REACH = "can-reach="
AUTODETECTION_METHOD_INTERFACE = "interface="
AUTODETECTION_METHOD_SKIP_INTERFACE = "skip-interface="
)
var (
checkLogTimeout = 10 * time.Second
backendMatch = regexp.MustCompile("^(none|bird|gobgp)$")
)
// Run function collects diagnostic information and logs
func Run(args []string) error {
var err error
doc := `Usage:
calicoctl node run [--ip=<IP>] [--ip6=<IP6>] [--as=<AS_NUM>]
[--name=<NAME>]
[--ip-autodetection-method=<IP_AUTODETECTION_METHOD>]
[--ip6-autodetection-method=<IP6_AUTODETECTION_METHOD>]
[--log-dir=<LOG_DIR>]
[--node-image=<DOCKER_IMAGE_NAME>]
[--backend=(bird|gobgp|none)]
[--config=<CONFIG>]
[--no-default-ippools]
[--dryrun]
[--init-system]
Options:
-h --help Show this screen.
--name=<NAME> The name of the Calico node. If this is not
supplied it defaults to the host name.
--as=<AS_NUM> Set the AS number for this node. If omitted, it
will use the value configured on the node resource.
If there is no configured value and --as option is
omitted, the node will inherit the global AS number
(see 'calicoctl config' for details).
--ip=<IP> Set the local IPv4 routing address for this node.
If omitted, it will use the value configured on the
node resource. If there is no configured value
and the --ip option is omitted, the node will
attempt to autodetect an IP address to use. Use a
value of 'autodetect' to always force autodetection
of the IP each time the node starts.
--ip6=<IP6> Set the local IPv6 routing address for this node.
If omitted, it will use the value configured on the
node resource. If there is no configured value
and the --ip6 option is omitted, the node will not
route IPv6. Use a value of 'autodetect' to force
autodetection of the IP each time the node starts.
--ip-autodetection-method=<IP_AUTODETECTION_METHOD>
Specify the autodetection method for detecting the
local IPv4 routing address for this node. The valid
options are:
> first-found
Use the first valid IP address on the first
enumerated interface (common known exceptions are
filtered out, e.g. the docker bridge). It is not
recommended to use this if you have multiple
external interfaces on your host.
> can-reach=<IP OR DOMAINNAME>
Use the interface determined by your host routing
tables that will be used to reach the supplied
destination IP or domain name.
> interface=<IFACE NAME REGEX LIST>
Use the first valid IP address found on interfaces
named as per the first matching supplied interface
name regex. Regexes are separated by commas
(e.g. eth.*,enp0s.*).
> skip-interface=<IFACE NAME REGEX LIST>
Use the first valid IP address on the first
enumerated interface (same logic as first-found
above) that does NOT match with any of the
specified interface name regexes. Regexes are
separated by commas (e.g. eth.*,enp0s.*).
[default: first-found]
--ip6-autodetection-method=<IP6_AUTODETECTION_METHOD>
Specify the autodetection method for detecting the
local IPv6 routing address for this node. See
ip-autodetection-method flag for valid options.
[default: first-found]
--log-dir=<LOG_DIR> The directory containing Calico logs.
[default: /var/log/calico]
--node-image=<DOCKER_IMAGE_NAME>
Docker image to use for Calico's per-node container.
[default: quay.io/calico/node:latest]
--backend=(bird|gobgp|none)
Specify which networking backend to use. When set
to "none", Calico node runs in policy only mode.
The option to run with gobgp is currently
experimental.
[default: bird]
--dryrun Output the appropriate command, without starting the
container.
--init-system Run the appropriate command to use with an init
system.
--no-default-ippools Do not create default pools upon startup.
Default IP pools will be created if this is not set
and there are no pre-existing Calico IP pools.
-c --config=<CONFIG> Path to the file containing connection
configuration in YAML or JSON format.
[default: ` + constants.DefaultConfigPath + `]
Description:
This command is used to start a calico/node container instance which provides
Calico networking and network policy on your compute host.
`
arguments, err := docopt.Parse(doc, args, true, "", false, false)
if err != nil {
log.Info(err)
return fmt.Errorf("Invalid option: 'calicoctl %s'. Use flag '--help' to read about a specific subcommand.", strings.Join(args, " "))
}
if len(arguments) == 0 {
return nil
}
// Extract all the parameters.
ipv4 := argutils.ArgStringOrBlank(arguments, "--ip")
ipv6 := argutils.ArgStringOrBlank(arguments, "--ip6")
ipv4ADMethod := argutils.ArgStringOrBlank(arguments, "--ip-autodetection-method")
ipv6ADMethod := argutils.ArgStringOrBlank(arguments, "--ip6-autodetection-method")
logDir := argutils.ArgStringOrBlank(arguments, "--log-dir")
asNumber := argutils.ArgStringOrBlank(arguments, "--as")
img := argutils.ArgStringOrBlank(arguments, "--node-image")
backend := argutils.ArgStringOrBlank(arguments, "--backend")
dryrun := argutils.ArgBoolOrFalse(arguments, "--dryrun")
name := argutils.ArgStringOrBlank(arguments, "--name")
nopools := argutils.ArgBoolOrFalse(arguments, "--no-default-ippools")
config := argutils.ArgStringOrBlank(arguments, "--config")
initSystem := argutils.ArgBoolOrFalse(arguments, "--init-system")
// Validate parameters.
if ipv4 != "" && ipv4 != "autodetect" {
ip := argutils.ValidateIP(ipv4)
if ip.Version() != 4 {
return fmt.Errorf("Error executing command: --ip is wrong IP version")
}
}
if ipv6 != "" && ipv6 != "autodetect" {
ip := argutils.ValidateIP(ipv6)
if ip.Version() != 6 {
return fmt.Errorf("Error executing command: --ip6 is wrong IP version")
}
}
if asNumber != "" {
// The calico/node image does not accept dotted notation for
// the AS number, so convert.
asNumber = argutils.ValidateASNumber(asNumber).String()
}
if !backendMatch.MatchString(backend) {
return fmt.Errorf("Error executing command: unknown backend '%s'", backend)
}
// Validate the IP autodetection methods if specified.
if err := validateIpAutodetectionMethod(ipv4ADMethod, 4); err != nil {
return err
}
if err := validateIpAutodetectionMethod(ipv6ADMethod, 6); err != nil {
return err
}
// Use the hostname if a name is not specified. We should always
// pass in a fixed value to the node container so that if the user
// changes the hostname, the calico/node won't start using a different
// name.
if name == "" {
name, err = names.Hostname()
if err != nil || name == "" {
return fmt.Errorf("Error executing command: unable to determine node name")
}
}
// Load the etcd configuraiton.
cfg, err := clientmgr.LoadClientConfig(config)
if err != nil {
return fmt.Errorf("Error executing command: invalid config file")
}
if cfg.Spec.DatastoreType != apiconfig.EtcdV3 {
return fmt.Errorf("Error executing command: unsupported backend specified in config")
}
etcdcfg := cfg.Spec.EtcdConfig
// Create a mapping of environment variables to values.
envs := map[string]string{
"NODENAME": name,
"CALICO_NETWORKING_BACKEND": backend,
}
if nopools {
envs["NO_DEFAULT_POOLS"] = "true"
}
if ipv4ADMethod != AUTODETECTION_METHOD_FIRST {
envs["IP_AUTODETECTION_METHOD"] = ipv4ADMethod
}
if ipv6ADMethod != AUTODETECTION_METHOD_FIRST {
envs["IP6_AUTODETECTION_METHOD"] = ipv6ADMethod
}
if asNumber != "" {
envs["AS"] = asNumber
}
if ipv4 != "" {
envs["IP"] = ipv4
}
if ipv6 != "" {
envs["IP6"] = ipv6
}
// Create a struct for volumes to mount.
type vol struct {
hostPath string
containerPath string
}
// vols is a slice of volume bindings.
vols := []vol{
{hostPath: logDir, containerPath: "/var/log/calico"},
{hostPath: "/var/run/calico", containerPath: "/var/run/calico"},
{hostPath: "/var/lib/calico", containerPath: "/var/lib/calico"},
{hostPath: "/lib/modules", containerPath: "/lib/modules"},
{hostPath: "/run", containerPath: "/run"},
}
envs["ETCD_ENDPOINTS"] = etcdcfg.EtcdEndpoints
envs["ETCD_DISCOVERY_SRV"] = etcdcfg.EtcdDiscoverySrv
if etcdcfg.EtcdCACertFile != "" {
envs["ETCD_CA_CERT_FILE"] = ETCD_CA_CERT_NODE_FILE
vols = append(vols, vol{hostPath: etcdcfg.EtcdCACertFile, containerPath: ETCD_CA_CERT_NODE_FILE})
}
if etcdcfg.EtcdKeyFile != "" && etcdcfg.EtcdCertFile != "" {
envs["ETCD_KEY_FILE"] = ETCD_KEY_NODE_FILE
vols = append(vols, vol{hostPath: etcdcfg.EtcdKeyFile, containerPath: ETCD_KEY_NODE_FILE})
envs["ETCD_CERT_FILE"] = ETCD_CERT_NODE_FILE
vols = append(vols, vol{hostPath: etcdcfg.EtcdCertFile, containerPath: ETCD_CERT_NODE_FILE})
}
// Create the Docker command to execute (or display). Start with the
// fixed parts. If this is not for an init system, we'll include the
// detach flag (to prevent the command blocking), and use Dockers built
// in restart mechanism. If this is for an init-system we want the
// command to remain attached and for Docker to remove the dead
// container so that it can be restarted by the init system.
cmd := []string{"docker", "run", "--net=host", "--privileged",
"--name=calico-node"}
if initSystem {
cmd = append(cmd, "--rm")
} else {
cmd = append(cmd, "-d", "--restart=always")
}
// Add the environment variable pass-through.
for k, v := range envs {
cmd = append(cmd, "-e", fmt.Sprintf("%s=%s", k, v))
}
// Add the volume mounts.
for _, v := range vols {
cmd = append(cmd, "-v", fmt.Sprintf("%s:%s", v.hostPath, v.containerPath))
}
// Add the container image name
cmd = append(cmd, img)
if dryrun {
fmt.Println("Use the following command to start the calico/node container:")
fmt.Printf("\n%s\n\n", strings.Join(cmd, " "))
if !initSystem {
fmt.Println("If you are running calico/node in an init system, use the --init-system flag")
fmt.Println("to display the appropriate start and stop commands.")
} else {
fmt.Println("Use the following command to stop the calico/node container:")
fmt.Printf("\ndocker stop calico-node\n\n")
}
return nil
}
// This is not a dry run. Check that we are running as root.
enforceRoot()
// Normally, Felix will load the modules it needs, but when running inside a
// container it might not be able to do so. Ensure the required modules are
// loaded each time the node starts.
// We only make a best effort attempt because the command may fail if the
// modules are built in.
if !runningInContainer() {
log.Info("Running in container")
loadModules()
if err := setupIPForwarding(); err != nil {
return err
}
setNFConntrackMax()
}
// Make sure the calico-node is not already running before we attempt
// to start the node.
fmt.Println("Removing old calico-node container (if running).")
err = exec.Command("docker", "rm", "-f", "calico-node").Run()
if err != nil {
log.WithError(err).Debug("Unable to remove calico-node container (ok if container was not running)")
}
// Run the docker command.
fmt.Println("Running the following command to start calico-node:")
fmt.Printf("\n%s\n\n", strings.Join(cmd, " "))
fmt.Println("Image may take a short time to download if it is not available locally.")
// Now execute the actual Docker run command and check for the
// unable to find image message.
runCmd := exec.Command(cmd[0], cmd[1:]...)
if output, err := runCmd.CombinedOutput(); err != nil {
errStr := fmt.Sprintf("Error executing command: %v\n", err)
for _, line := range strings.Split(string(output), "/n") {
errStr += fmt.Sprintf(" | %s/n", line)
}
return fmt.Errorf(errStr)
}
// Create the command to follow the docker logs for the calico/node
fmt.Print("Container started, checking progress logs.\n\n")
logCmd := exec.Command("docker", "logs", "--follow", "calico-node")
// Get the stdout pipe
outPipe, err := logCmd.StdoutPipe()
if err != nil {
return fmt.Errorf("Error executing command: unable to check calico/node logs: %v", err)
}
outScanner := bufio.NewScanner(outPipe)
// Start following the logs.
err = logCmd.Start()
if err != nil {
return fmt.Errorf("Error executing command: unable to check calico/node logs: %v", err)
}
// Protect against calico processes taking too long to start, or docker
// logs hanging without output.
time.AfterFunc(checkLogTimeout, func() {
err = logCmd.Process.Kill()
if err != nil {
fmt.Printf("Error attempting to kill process: check logs for details")
}
})
// Read stdout until the node fails, or until we see the output
// indicating success.
started := false
for outScanner.Scan() {
line := outScanner.Text()
fmt.Println(line)
if line == "Calico node started successfully" {
started = true
break
}
}
// Kill the process if it is still running.
err = logCmd.Process.Kill()
if err != nil {
return fmt.Errorf("Error attempting to kill process: check logs for details")
}
err = logCmd.Wait()
if err != nil {
return fmt.Errorf("Error waiting for process to exit: check logs for details")
}
// If we didn't successfully start then notify the user.
if outScanner.Err() != nil {
return fmt.Errorf("Error executing command: error reading calico/node logs, check logs for details")
} else if !started {
return fmt.Errorf("Error executing command: calico/node has terminated, check logs for details")
}
return nil
}
// runningInContainer returns whether we are running calicoctl within a container.
func runningInContainer() bool {
v := os.Getenv("CALICO_CTL_CONTAINER")
return v != ""
}
func loadModules() {
cmd := []string{"modprobe", "-a", "xt_set", "ip6_tables"}
fmt.Printf("Running command to load modules: %s\n", strings.Join(cmd, " "))
err := exec.Command(cmd[0], cmd[1:]...).Run()
if err != nil {
log.Warning(err)
}
}
func setupIPForwarding() error {
fmt.Println("Enabling IPv4 forwarding")
err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward",
[]byte("1"), 0)
if err != nil {
return fmt.Errorf("ERROR: Could not enable ipv4 forwarding")
}
if _, err := os.Stat("/proc/sys/net/ipv6"); err == nil {
fmt.Println("Enabling IPv6 forwarding")
err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding",
[]byte("1"), 0)
if err != nil {
return fmt.Errorf("ERROR: Could not enable ipv6 forwarding")
}
}
return nil
}
func setNFConntrackMax() {
// A common problem on Linux systems is running out of space in the conntrack
// table, which can cause poor iptables performance. This can happen if you
// run a lot of workloads on a given host, or if your workloads create a lot
// of TCP connections or bidirectional UDP streams.
//
// To avoid this becoming a problem, we recommend increasing the conntrack
// table size. To do so, run the following commands:
fmt.Println("Increasing conntrack limit")
err := ioutil.WriteFile("/proc/sys/net/netfilter/nf_conntrack_max",
[]byte("1000000"), 0)
if err != nil {
fmt.Println("WARNING: Could not set nf_contrack_max. This may have an impact at scale.")
}
}
// Validate the IP autodection method string.
func validateIpAutodetectionMethod(method string, version int) error {
if method == AUTODETECTION_METHOD_FIRST {
// Auto-detection method is "first-found", no additional validation
// required.
return nil
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_CAN_REACH) {
// Auto-detection method is "can-reach", validate that the address
// resolves to at least one IP address of the required version.
addrStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_CAN_REACH)
ips, err := gonet.LookupIP(addrStr)
if err != nil {
return fmt.Errorf("Error executing command: cannot resolve address specified for IP autodetection: %s", addrStr)
}
for _, ip := range ips {
cip := net.IP{IP: ip}
if cip.Version() == version {
return nil
}
}
return fmt.Errorf("Error executing command: address for IP autodetection does not resolve to an IPv%d address: %s", version, addrStr)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_INTERFACE) {
// Auto-detection method is "interface", validate that the interface
// regex is a valid golang regex.
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_INTERFACE)
// Regexes are provided in a string separated by ","
ifRegexes := strings.Split(ifStr, ",")
for _, ifRegex := range ifRegexes {
if _, err := regexp.Compile(ifStr); err != nil {
return fmt.Errorf("Error executing command: invalid interface regex specified for IP autodetection: %s", ifRegex)
}
}
return nil
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE) {
// Auto-detection method is "skip-interface", validate that the
// interface regexes used are valid golang regexes.
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE)
// Regexes are provided in a string separated by ","
ifRegexes := strings.Split(ifStr, ",")
for _, ifRegex := range ifRegexes {
if _, err := regexp.Compile(ifRegex); err != nil {
return fmt.Errorf("Error executing command: invalid interface regex specified for IP autodetection: %s", ifRegex)
}
}
return nil
}
return fmt.Errorf("Error executing command: invalid IP autodetection method: %s", method)
}
|
[
"\"CALICO_CTL_CONTAINER\""
] |
[] |
[
"CALICO_CTL_CONTAINER"
] |
[]
|
["CALICO_CTL_CONTAINER"]
|
go
| 1 | 0 | |
userbot/__init__.py
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# inline credit @keselekpermen69
# Recode by @mrismanaziz
# t.me/SharingUserbot
#
""" Userbot initialization. """
import os
import re
import sys
import time
from distutils.util import strtobool as sb
from logging import DEBUG, INFO, basicConfig, getLogger
from math import ceil
from pathlib import Path
from sys import version_info
from dotenv import load_dotenv
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from pytgcalls import PyTgCalls
from requests import get
from telethon.errors import UserIsBlockedError
from telethon.network.connection.tcpabridged import ConnectionTcpAbridged
from telethon.sessions import StringSession
from telethon.sync import TelegramClient, custom, events
from telethon.tl.types import InputWebDocument
from telethon.utils import get_display_name
from .storage import Storage
def STORAGE(n):
return Storage(Path("data") / n)
load_dotenv("config.env")
StartTime = time.time()
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
CMD_LIST = {}
SUDO_LIST = {}
ZALG_LIST = {}
LOAD_PLUG = {}
INT_PLUG = ""
ISAFK = False
AFKREASON = None
ENABLE_KILLME = True
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="[%(name)s] - [%(levelname)s] - %(message)s",
level=DEBUG,
)
else:
basicConfig(
format="[%(name)s] - [%(levelname)s] - %(message)s",
level=INFO,
)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 9:
LOGS.info(
"Anda HARUS memiliki python setidaknya versi 3.9."
"Beberapa fitur tergantung versi python ini. Bot berhenti."
)
sys.exit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None
)
if CONFIG_CHECK:
LOGS.info(
"Harap hapus baris yang disebutkan dalam tagar pertama dari file config.env"
)
sys.exit(1)
# KALO NGEFORK ID DEVS SAMA ID BLACKLIST_CHAT NYA GA USAH DI HAPUS YA GOBLOK 😡
DEVS = [
844432220,
1906014306,
1382636419,
1712874582,
1738637033,
]
SUDO_USERS = {int(x) for x in os.environ.get("SUDO_USERS", "").split()}
BL_CHAT = {int(x) for x in os.environ.get("BL_CHAT", "").split()}
# For Blacklist Group Support
BLACKLIST_CHAT = os.environ.get("BLACKLIST_CHAT", None)
if not BLACKLIST_CHAT:
BLACKLIST_CHAT = [-1001473548283]
# JANGAN DI HAPUS GOBLOK 😡 LU COPY/EDIT AJA TINGGAL TAMBAHIN PUNYA LU
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
# Telegram App KEY and HASH
API_KEY = int(os.environ.get("API_KEY") or 0)
API_HASH = str(os.environ.get("API_HASH") or None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID") or 0)
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "True"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "True"))
# Load or No Load modules
LOAD = os.environ.get("LOAD", "").split()
NO_LOAD = os.environ.get("NO_LOAD", "").split()
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
PM_LIMIT = int(os.environ.get("PM_LIMIT", 6))
# Custom Handler command
CMD_HANDLER = os.environ.get("CMD_HANDLER") or "."
SUDO_HANDLER = os.environ.get("SUDO_HANDLER", r"$")
# Owner ID
OWNER_ID = int(os.environ.get("OWNER_ID") or 0)
# Support
GROUP = os.environ.get("GROUP", "SharingUserbot")
CHANNEL = os.environ.get("CHANNEL", "Lunatic0de")
# Heroku Credentials for updater.
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# JustWatch Country
WATCH_COUNTRY = os.environ.get("WATCH_COUNTRY", "ID")
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL", "https://github.com/masjarwo123/Diyo.git"
)
UPSTREAM_REPO_BRANCH = os.environ.get("UPSTREAM_REPO_BRANCH", "Man-Userbot")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER") or "/usr/bin/chromedriver"
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN") or "/usr/bin/google-chrome"
# set to True if you want to log PMs to your BOTLOG_CHATID
NC_LOG_P_M_S = bool(os.environ.get("NC_LOG_P_M_S", "False"))
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", "Jakarta")
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# For MONGO based DataBase
MONGO_URI = os.environ.get("MONGO_URI", None)
# set blacklist_chats where you do not want userbot's features
UB_BLACK_LIST_CHAT = os.environ.get("UB_BLACK_LIST_CHAT", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# untuk perintah teks costum .alive
ALIVE_TEKS_CUSTOM = os.environ.get("ALIVE_TEKS_CUSTOM", "Hey, I am alive.")
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", "Diyo")
# Custom Emoji Alive
ALIVE_EMOJI = os.environ.get("ALIVE_EMOJI", "👿")
# Custom Emoji Alive
INLINE_EMOJI = os.environ.get("INLINE_EMOJI", "✔︎")
# Custom icon HELP
ICON_HELP = os.environ.get("ICON_HELP", "❁")
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", "ID"))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY", "./zips")
# bit.ly module
BITLY_TOKEN = os.environ.get("BITLY_TOKEN", None)
# Bot Name
TERM_ALIAS = os.environ.get("TERM_ALIAS", "Diyo")
# Bot version
BOT_VER = os.environ.get("BOT_VER", "1.9.3")
# Default .alive username
ALIVE_USERNAME = os.environ.get("ALIVE_USERNAME", None)
# Sticker Custom Pack Name
S_PACK_NAME = os.environ.get("S_PACK_NAME", f"Sticker Pack {ALIVE_NAME}")
# Default .alive logo
ALIVE_LOGO = (
os.environ.get("ALIVE_LOGO") or "https://telegra.ph/file/3307384c293f7f54ed723.jpg"
)
INLINE_PIC = (
os.environ.get("INLINE_PIC") or "https://telegra.ph/file/3307384c293f7f54ed723.jpg"
)
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
lastfm = None
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
try:
lastfm = LastFMNetwork(
api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS,
)
except Exception:
pass
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY", "./downloads/")
# Genius lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN", None)
# Quotes API Token
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN", None)
# NSFW Detect DEEP AI
DEEP_AI = os.environ.get("DEEP_AI", None)
# Photo Chat - Get this value from http://antiddos.systems
API_TOKEN = os.environ.get("API_TOKEN", None)
API_URL = os.environ.get("API_URL", "http://antiddos.systems")
# Inline bot helper
BOT_TOKEN = os.environ.get("BOT_TOKEN", None)
BOT_USERNAME = os.environ.get("BOT_USERNAME", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists("bin"):
os.mkdir("bin")
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown": "bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py": "bin/cmrudl",
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# Jangan di hapus Nanti ERROR
# 'bot' variable
if STRING_SESSION:
session = StringSession(str(STRING_SESSION))
else:
session = "ManUserBot"
try:
bot = TelegramClient(
session=session,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py = PyTgCalls(bot)
except Exception as e:
print(f"STRING_SESSION - {e}")
sys.exit()
async def check_botlog_chatid() -> None:
if not BOTLOG_CHATID and BOTLOG:
LOGS.warning(
"var BOTLOG_CHATID kamu belum di isi. Buatlah grup telegram dan masukan bot @MissRose_bot lalu ketik /id Masukan id grup nya di var BOTLOG_CHATID"
)
sys.exit(1)
async def update_restart_msg(chat_id, msg_id):
DEFAULTUSER = ALIVE_NAME or "Set `ALIVE_NAME` ConfigVar!"
message = (
f"**Diyo v{BOT_VER} is back up and running!**\n\n"
f"**Telethon:** {version.__version__}\n"
f"**Python:** {python_version()}\n"
f"**User:** {DEFAULTUSER}"
)
await bot.edit_message(chat_id, msg_id, message)
return True
try:
from userbot.modules.sql_helper.globals import delgvar, gvarstatus
chat_id, msg_id = gvarstatus("restartstatus").split("\n")
with bot:
try:
bot.loop.run_until_complete(update_restart_msg(int(chat_id), int(msg_id)))
except BaseException:
pass
delgvar("restartstatus")
except AttributeError:
pass
if BOT_TOKEN is not None:
tgbot = TelegramClient(
"TG_BOT_TOKEN",
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
).start(bot_token=BOT_TOKEN)
else:
tgbot = None
def paginate_help(page_number, loaded_modules, prefix):
number_of_rows = 5
number_of_cols = 4
global looters
looters = page_number
helpable_modules = [p for p in loaded_modules if not p.startswith("_")]
helpable_modules = sorted(helpable_modules)
modules = [
custom.Button.inline(
"{} {} {}".format(f"{INLINE_EMOJI}", x, f"{INLINE_EMOJI}"),
data="ub_modul_{}".format(x),
)
for x in helpable_modules
]
pairs = list(
zip(
modules[::number_of_cols],
modules[1::number_of_cols],
modules[2::number_of_cols],
)
)
if len(modules) % number_of_cols == 1:
pairs.append((modules[-1],))
max_num_pages = ceil(len(pairs) / number_of_rows)
modulo_page = page_number % max_num_pages
if len(pairs) > number_of_rows:
pairs = pairs[
modulo_page * number_of_rows : number_of_rows * (modulo_page + 1)
] + [
(
custom.Button.inline(
"««", data="{}_prev({})".format(prefix, modulo_page)
),
custom.Button.inline("Tutup", b"close"),
custom.Button.inline(
"»»", data="{}_next({})".format(prefix, modulo_page)
),
)
]
return pairs
with bot:
try:
from userbot.modules.sql_helper.bot_blacklists import check_is_black_list
from userbot.modules.sql_helper.bot_pms_sql import add_user_to_db, get_user_id
from userbot.utils import reply_id
dugmeler = CMD_HELP
user = bot.get_me()
uid = user.id
owner = user.first_name
logo = ALIVE_LOGO
logoman = INLINE_PIC
tgbotusername = BOT_USERNAME
@tgbot.on(events.NewMessage(incoming=True, func=lambda e: e.is_private))
async def bot_pms(event):
chat = await event.get_chat()
if check_is_black_list(chat.id):
return
if chat.id != uid:
msg = await event.forward_to(uid)
try:
add_user_to_db(
msg.id, get_display_name(chat), chat.id, event.id, 0, 0
)
except Exception as e:
LOGS.error(str(e))
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
f"**ERROR:** Saat menyimpan detail pesan di database\n`{str(e)}`",
)
else:
if event.text.startswith("/"):
return
reply_to = await reply_id(event)
if reply_to is None:
return
users = get_user_id(reply_to)
if users is None:
return
for usr in users:
user_id = int(usr.chat_id)
reply_msg = usr.reply_id
user_name = usr.first_name
break
if user_id is not None:
try:
if event.media:
msg = await event.client.send_file(
user_id,
event.media,
caption=event.text,
reply_to=reply_msg,
)
else:
msg = await event.client.send_message(
user_id,
event.text,
reply_to=reply_msg,
link_preview=False,
)
except UserIsBlockedError:
return await event.reply(
"❌ **Bot ini diblokir oleh pengguna.**"
)
except Exception as e:
return await event.reply(f"**ERROR:** `{e}`")
try:
add_user_to_db(
reply_to, user_name, user_id, reply_msg, event.id, msg.id
)
except Exception as e:
LOGS.error(str(e))
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
f"**ERROR:** Saat menyimpan detail pesan di database\n`{e}`",
)
@tgbot.on(events.InlineQuery)
async def inline_handler(event):
builder = event.builder
result = None
query = event.text
if event.query.user_id == uid and query.startswith("@SharingUserbot"):
buttons = paginate_help(0, dugmeler, "helpme")
result = builder.photo(
file=logoman,
link_preview=False,
text=f"**✗ Diyo Inline Menu ✗**\n\n✣ **Owner** [{user.first_name}](tg://user?id={user.id})\n✣ **Jumlah** `{len(dugmeler)}` Modules",
buttons=buttons,
)
elif query.startswith("repo"):
result = builder.article(
title="Repository",
description="Repository Diyo",
url="https://t.me/kalongclub",
thumb=InputWebDocument(INLINE_PIC, 0, "image/jpeg", []),
text="**Diyo**\n➖➖➖➖➖➖➖➖➖➖\n✣ **Owner Repo :** [Diyo](https://t.me/dewaslow)\n✣ **Support :** @chipmunkchanel\n✣ **Repository :** [Diyo](https://github.com/masjarwo123/Diyo)\n➖➖➖➖➖➖➖➖➖➖",
buttons=[
[
custom.Button.url("ɢʀᴏᴜᴘ", "https://t.me/kalongclub"),
custom.Button.url(
"ʀᴇᴘᴏ", "https://github.com/masjarwo123/Diyo"
),
],
],
link_preview=False,
)
else:
result = builder.article(
title="✗ Diyo ✗",
description="Diyo | Telethon",
url="https://t.me/kalongclub",
thumb=InputWebDocument(INLINE_PIC, 0, "image/jpeg", []),
text=f"**Diyo**\n➖➖➖➖➖➖➖➖➖➖\n✣ **UserMode:** [{user.first_name}](tg://user?id={user.id})\n✣ **Assistant:** {tgbotusername}\n➖➖➖➖➖➖➖➖➖➖\n**Support:** @chipmunkchanel\n➖➖➖➖➖➖➖➖➖➖",
buttons=[
[
custom.Button.url("ɢʀᴏᴜᴘ", "https://t.me/kalongclub"),
custom.Button.url(
"ʀᴇᴘᴏ", "https://github.com/masjarwo123/diyo"
),
],
],
link_preview=False,
)
await event.answer(
[result], switch_pm="👥 USERBOT PORTAL", switch_pm_param="start"
)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(rb"reopen")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(looters)
buttons = paginate_help(current_page_number, dugmeler, "helpme")
text = f"**✗ Diyo Inline Menu ✗**\n\n✣ **Owner** [{user.first_name}](tg://user?id={user.id})\n✣ **Jumlah** `{len(dugmeler)}` Modules"
await event.edit(
text,
file=logoman,
buttons=buttons,
link_preview=False,
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery(
data=re.compile(rb"helpme_next\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(current_page_number + 1, dugmeler, "helpme")
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = (
f"Kamu Tidak diizinkan, ini Userbot Milik {ALIVE_NAME}"
)
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(b"close")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in DEVS and SUDO_USERS:
openlagi = custom.Button.inline("• Re-Open Menu •", data="reopen")
await event.edit(
"⚜️ **Help Mode Button Ditutup!** ⚜️", buttons=openlagi
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery(
data=re.compile(rb"helpme_prev\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(current_page_number - 1, dugmeler, "helpme")
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(b"ub_modul_(.*)")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
modul_name = event.data_match.group(1).decode("UTF-8")
cmdhel = str(CMD_HELP[modul_name])
if len(cmdhel) > 150:
help_string = (
str(CMD_HELP[modul_name])
.replace("`", "")
.replace("**", "")[:150]
+ "..."
+ "\n\nBaca Teks Berikutnya Ketik .help "
+ modul_name
+ " "
)
else:
help_string = (
str(CMD_HELP[modul_name]).replace("`", "").replace("**", "")
)
reply_pop_up_alert = (
help_string
if help_string is not None
else "{} Tidak ada dokumen yang telah ditulis untuk modul.".format(
modul_name
)
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
except BaseException:
LOGS.info(
"Help Mode Inline Bot Mu Tidak aktif. Tidak di aktifkan juga tidak apa-apa. "
"Untuk Mengaktifkannya Buat bot di @BotFather Lalu Tambahkan var BOT_TOKEN dan BOT_USERNAME. "
"Pergi Ke @BotFather lalu settings bot » Pilih mode inline » Turn On. "
)
try:
bot.loop.run_until_complete(check_botlog_chatid())
except BaseException as e:
LOGS.exception(f"[BOTLOG] - {e}")
sys.exit(1)
|
[] |
[] |
[
"WATCH_COUNTRY",
"INLINE_EMOJI",
"GOOGLE_CHROME_BIN",
"LYDIA_API_KEY",
"COUNTRY",
"BLACKLIST_CHAT",
"LASTFM_API",
"BOT_TOKEN",
"ANTI_SPAMBOT_SHOUT",
"CMD_HANDLER",
"UPSTREAM_REPO_URL",
"OCR_SPACE_API_KEY",
"BIO_PREFIX",
"LOGSPAMMER",
"NC_LOG_P_M_S",
"NO_LOAD",
"TZ_NUMBER",
"SUDO_USERS",
"LASTFM_PASSWORD",
"BOT_VER",
"ICON_HELP",
"DATABASE_URL",
"INLINE_PIC",
"HEROKU_APP_NAME",
"___________PLOX_______REMOVE_____THIS_____LINE__________",
"ALIVE_LOGO",
"GIT_REPO_NAME",
"HEROKU_API_KEY",
"BOT_USERNAME",
"DEEZER_ARL_TOKEN",
"CHROME_DRIVER",
"API_URL",
"DEEP_AI",
"MONGO_URI",
"ALIVE_EMOJI",
"YOUTUBE_API_KEY",
"LASTFM_USERNAME",
"API_KEY",
"PM_AUTO_BAN",
"PM_LIMIT",
"ALIVE_TEKS_CUSTOM",
"DEFAULT_BIO",
"ANTI_SPAMBOT",
"OPEN_WEATHER_MAP_APPID",
"S_PACK_NAME",
"LASTFM_SECRET",
"UB_BLACK_LIST_CHAT",
"ZIP_DOWNLOAD_DIRECTORY",
"CHANNEL",
"UPSTREAM_REPO_BRANCH",
"WEATHER_DEFCITY",
"ALIVE_USERNAME",
"STRING_SESSION",
"OWNER_ID",
"QUOTES_API_TOKEN",
"CONSOLE_LOGGER_VERBOSE",
"GITHUB_ACCESS_TOKEN",
"LOAD",
"SUDO_HANDLER",
"ALIVE_NAME",
"BOTLOG_CHATID",
"BITLY_TOKEN",
"CLEAN_WELCOME",
"TMP_DOWNLOAD_DIRECTORY",
"GENIUS_ACCESS_TOKEN",
"API_TOKEN",
"REM_BG_API_KEY",
"GROUP",
"BOTLOG",
"API_HASH",
"BL_CHAT",
"TERM_ALIAS"
] |
[]
|
["WATCH_COUNTRY", "INLINE_EMOJI", "GOOGLE_CHROME_BIN", "LYDIA_API_KEY", "COUNTRY", "BLACKLIST_CHAT", "LASTFM_API", "BOT_TOKEN", "ANTI_SPAMBOT_SHOUT", "CMD_HANDLER", "UPSTREAM_REPO_URL", "OCR_SPACE_API_KEY", "BIO_PREFIX", "LOGSPAMMER", "NC_LOG_P_M_S", "NO_LOAD", "TZ_NUMBER", "SUDO_USERS", "LASTFM_PASSWORD", "BOT_VER", "ICON_HELP", "DATABASE_URL", "INLINE_PIC", "HEROKU_APP_NAME", "___________PLOX_______REMOVE_____THIS_____LINE__________", "ALIVE_LOGO", "GIT_REPO_NAME", "HEROKU_API_KEY", "BOT_USERNAME", "DEEZER_ARL_TOKEN", "CHROME_DRIVER", "API_URL", "DEEP_AI", "MONGO_URI", "ALIVE_EMOJI", "YOUTUBE_API_KEY", "LASTFM_USERNAME", "API_KEY", "PM_AUTO_BAN", "PM_LIMIT", "ALIVE_TEKS_CUSTOM", "DEFAULT_BIO", "ANTI_SPAMBOT", "OPEN_WEATHER_MAP_APPID", "S_PACK_NAME", "LASTFM_SECRET", "UB_BLACK_LIST_CHAT", "ZIP_DOWNLOAD_DIRECTORY", "CHANNEL", "UPSTREAM_REPO_BRANCH", "WEATHER_DEFCITY", "ALIVE_USERNAME", "STRING_SESSION", "OWNER_ID", "QUOTES_API_TOKEN", "CONSOLE_LOGGER_VERBOSE", "GITHUB_ACCESS_TOKEN", "LOAD", "SUDO_HANDLER", "ALIVE_NAME", "BOTLOG_CHATID", "BITLY_TOKEN", "CLEAN_WELCOME", "TMP_DOWNLOAD_DIRECTORY", "GENIUS_ACCESS_TOKEN", "API_TOKEN", "REM_BG_API_KEY", "GROUP", "BOTLOG", "API_HASH", "BL_CHAT", "TERM_ALIAS"]
|
python
| 72 | 0 | |
src/test/java/com/oneandone/rest/test/VPNTests.java
|
/*
* Copyright 2016 Ali.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.oneandone.rest.test;
import com.oneandone.rest.POJO.Requests.CreateVPNRequest;
import com.oneandone.rest.POJO.Requests.UpdateVPNRequest;
import com.oneandone.rest.POJO.Response.DataCenter;
import com.oneandone.rest.POJO.Response.VPNResponse;
import com.oneandone.rest.client.RestClientException;
import com.oneandone.sdk.OneAndOneApi;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.junit.AfterClass;
import static org.junit.Assert.assertNotNull;
import org.junit.BeforeClass;
import org.junit.Test;
/**
*
* @author Ali
*/
public class VPNTests {
static OneAndOneApi oneandoneApi = new OneAndOneApi();
static Random rand = new Random();
static List<VPNResponse> vpns;
static VPNResponse vpn;
@BeforeClass
public static void getVPNs() throws RestClientException, IOException {
oneandoneApi.setToken(System.getenv("OAO_TOKEN"));
createVPN();
List<VPNResponse> result = oneandoneApi.getVpnApi().getVPNs(0, 0, null, null, null);
vpns = result;
assertNotNull(result);
}
@Test
public void getVPN() throws RestClientException, IOException {
VPNResponse result = oneandoneApi.getVpnApi().getVPN(vpns.get(0).getId());
assertNotNull(result);
assertNotNull(result.getId());
}
@Test
public void getVPNConfiguration() throws RestClientException, IOException {
oneandoneApi.getVpnApi().getVPNConfigurationFile(vpns.get(0).getId(), "C:\\temp");
}
public static void createVPN() throws RestClientException, IOException {
String randomValue = rand.nextInt(99) + "test.java";
CreateVPNRequest request = new CreateVPNRequest();
request.setName(randomValue);
request.setDescription(randomValue + "desc");
List<DataCenter> datacenters = oneandoneApi.getDataCenterApi().getDataCenters(0, 0, null, null, null);
if (!datacenters.isEmpty()) {
request.setDatacenterId(datacenters.get(0).getId());
vpn = oneandoneApi.getVpnApi().createVPN(request);
assertNotNull(vpn);
assertNotNull(vpn.getId());
}
}
@Test
public void updateVPN() throws RestClientException, IOException, InterruptedException {
String randomValue = rand.nextInt(99) + "update.java";
UpdateVPNRequest request = new UpdateVPNRequest();
request.setName(randomValue);
VPNResponse result = oneandoneApi.getVpnApi().updateVPN(vpn.getId(), request);
assertNotNull(result);
assertNotNull(result.getId());
//check if the public ip updated
VPNResponse ipResult = oneandoneApi.getVpnApi().getVPN(result.getId());
assertNotNull(ipResult.getId());
}
@AfterClass
public static void deleteVPN() throws RestClientException, IOException, InterruptedException {
VPNResponse result = oneandoneApi.getVpnApi().deleteVPN(vpn.getId());
assertNotNull(result);
assertNotNull(result.getId());
}
}
|
[
"\"OAO_TOKEN\""
] |
[] |
[
"OAO_TOKEN"
] |
[]
|
["OAO_TOKEN"]
|
java
| 1 | 0 | |
pkg/utils/utils.go
|
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
// Licensed under the MIT (MIT-LICENSE.txt) license.
package utils
import (
"fmt"
"os"
"path/filepath"
log "github.com/ngaut/logging"
"github.com/ngaut/zkhelper"
"github.com/c4pt0r/cfg"
)
func InitConfig() (*cfg.Cfg, error) {
configFile := os.Getenv("CODIS_CONF")
if len(configFile) == 0 {
configFile = "config.ini"
}
ret := cfg.NewCfg(configFile)
if err := ret.Load(); err != nil {
return nil, err
}
return ret, nil
}
func InitConfigFromFile(filename string) (*cfg.Cfg, error) {
ret := cfg.NewCfg(filename)
if err := ret.Load(); err != nil {
return nil, err
}
return ret, nil
}
func GetZkLock(zkConn zkhelper.Conn, productName string) zkhelper.ZLocker {
zkPath := fmt.Sprintf("/zk/codis/db_%s/LOCK", productName)
ret := zkhelper.CreateMutex(zkConn, zkPath)
return ret
}
func GetExecutorPath() string {
filedirectory := filepath.Dir(os.Args[0])
execPath, err := filepath.Abs(filedirectory)
if err != nil {
log.Fatal(err)
}
return execPath
}
type Strings []string
func (s1 Strings) Eq(s2 []string) bool {
if len(s1) != len(s2) {
return false
}
for i := 0; i < len(s1); i++ {
if s1[i] != s2[i] {
return false
}
}
return true
}
|
[
"\"CODIS_CONF\""
] |
[] |
[
"CODIS_CONF"
] |
[]
|
["CODIS_CONF"]
|
go
| 1 | 0 | |
OIDN_def.py
|
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import scipy.io as sio
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Define Basic reconstruct block
class BasicBlock(torch.nn.Module):
def __init__(self,BLOCK_SIZE):
super(BasicBlock, self).__init__()
self.BLOCK_SIZE=BLOCK_SIZE
self.lambda_step = nn.Parameter(torch.Tensor([0.5]))
self.soft_thr = nn.Parameter(torch.Tensor([0.01]))
self.t = nn.Parameter(torch.Tensor([1.0]))
self.mergeScale = nn.Parameter(torch.Tensor([1.0]))
self.conv_D = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 3, 3, 3)))
self.conv1_forward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv2_forward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv1_backward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv2_backward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv1_G = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv2_G = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))
self.conv3_G = nn.Parameter(init.xavier_normal_(torch.Tensor(3, 32, 3, 3)))
def forward(self, xprev, x, PhiWeight, PhiTWeight, PhiTb):
tplus = (1+torch.sqrt(1+4*self.t*self.t))/2
xi = (self.t-1)/tplus
deltax = x-xprev
zeta = x - self.lambda_step * PhiTPhi_fun(x, PhiWeight, PhiTWeight,self.BLOCK_SIZE)
zeta = zeta - self.lambda_step * xi * PhiTPhi_fun(deltax, PhiWeight, PhiTWeight,self.BLOCK_SIZE)
zeta = zeta + xi * deltax
zeta = zeta + self.lambda_step * PhiTb
x = zeta
x_input = x
x_D = F.conv2d(x_input, self.conv_D, padding=1)
x = F.conv2d(x_D, self.conv1_forward, padding=1)
x = F.relu(x)
x_forward = F.conv2d(x, self.conv2_forward, padding=1)
x = torch.mul(torch.sign(x_forward), F.relu(torch.abs(x_forward) - self.soft_thr))
x = F.conv2d(x, self.conv1_backward, padding=1)
x = F.relu(x)
x_backward = F.conv2d(x, self.conv2_backward, padding=1)
x = F.conv2d(F.relu(x_backward), self.conv1_G, padding=1)
x = F.conv2d(F.relu(x), self.conv2_G, padding=1)
x_G = F.conv2d(x, self.conv3_G, padding=1)
x_pred = x_input + x_G*self.mergeScale
x = F.conv2d(x_forward, self.conv1_backward, padding=1)
x = F.relu(x)
x_D_est = F.conv2d(x, self.conv2_backward, padding=1)
symloss = x_D_est - x_D
return [x_pred, symloss]
# Define OIDN
class OIDN(torch.nn.Module):
def __init__(self, LayerNo, M, BLOCK_SIZE):
super(OIDN, self).__init__()
N = BLOCK_SIZE * BLOCK_SIZE
self.Phir = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))
self.Phig = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))
self.Phib = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))
self.Phi_scale = nn.Parameter(torch.Tensor([1.0]))
onelayer = []
self.LayerNo = LayerNo
self.M = M
self.N = N
self.BLOCK_SIZE = BLOCK_SIZE
for i in range(LayerNo):
onelayer.append(BasicBlock(BLOCK_SIZE))
self.fcs = nn.ModuleList(onelayer)
self.shuffle = torch.nn.PixelShuffle(BLOCK_SIZE)
def forward(self, x):
origX = x
# Sampling-subnet
Phir = self.Phir * self.Phi_scale
Phig = self.Phig * self.Phi_scale
Phib = self.Phib * self.Phi_scale
PhirWeight = Phir.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)
PhigWeight = Phig.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)
PhibWeight = Phib.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)
Phixr = F.conv2d(x[:,0:1,:,:], PhirWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)
Phixg = F.conv2d(x[:,1:2,:,:], PhigWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)
Phixb = F.conv2d(x[:,2:3,:,:], PhibWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)
# Initialization-subnet
PhiWeight = torch.cat((
PhirWeight,
PhigWeight,
PhibWeight),dim=1)
PhiTWeight = torch.cat((
Phir.t().contiguous().view(self.N, self.M, 1, 1),
Phig.t().contiguous().view(self.N, self.M, 1, 1),
Phib.t().contiguous().view(self.N, self.M, 1, 1)),dim=0)
PhiTb = torch.cat((
self.shuffle(F.conv2d(Phixr, Phir.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None)),
self.shuffle(F.conv2d(Phixg, Phig.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None)),
self.shuffle(F.conv2d(Phixb, Phib.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None))),
dim=1)
x = PhiTb
# Recovery-subnet
layers_sym = [] # for computing symmetric loss
xprev = x
for i in range(self.LayerNo):
[x1, layer_sym] = self.fcs[i](xprev, x, PhiWeight, PhiTWeight, PhiTb)
xprev = x
x=x1
layers_sym.append(layer_sym)
x_final = x
return [x_final, layers_sym, [Phir,Phig,Phib]]
def PhiTPhi_fun(x, PhiW, PhiTW,BLOCK_SIZE):
N = BLOCK_SIZE * BLOCK_SIZE
phir = F.conv2d(x[:,0:1,:,:], PhiW[:,0:1,:,:], padding=0,stride=BLOCK_SIZE, bias=None)
phig = F.conv2d(x[:,1:2,:,:], PhiW[:,1:2,:,:], padding=0,stride=BLOCK_SIZE, bias=None)
phib = F.conv2d(x[:,2:3,:,:], PhiW[:,2:3,:,:], padding=0,stride=BLOCK_SIZE, bias=None)
xtempr = F.conv2d(phir, PhiTW[0:N,:,:,:], padding=0, bias=None)
xtempg = F.conv2d(phig, PhiTW[N:N*2,:,:,:], padding=0, bias=None)
xtempb = F.conv2d(phib, PhiTW[N*2:N*3,:,:,:], padding=0, bias=None)
temp = torch.cat(
(
xtempr,xtempg,xtempb
),dim=1
)
return torch.nn.PixelShuffle(BLOCK_SIZE)(temp)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER"
] |
[]
|
["CUDA_DEVICE_ORDER"]
|
python
| 1 | 0 | |
pkg/cmd/build/buildkit.go
|
// Copyright 2020 The Okteto Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package build
import (
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/containerd/console"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/util/progress/progressui"
okErrors "github.com/okteto/okteto/pkg/errors"
"github.com/okteto/okteto/pkg/log"
"github.com/okteto/okteto/pkg/okteto"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/credentials/oauth"
)
const (
frontend = "dockerfile.v0"
)
//GetBuildKitHost returns the buildkit url and if Okteto Build Service is configured, or an error
func GetBuildKitHost() (string, bool, error) {
buildKitHost := os.Getenv("BUILDKIT_HOST")
if buildKitHost != "" {
log.Information("Running your build in %s...", buildKitHost)
return buildKitHost, false, nil
}
buildkitURL, err := okteto.GetBuildKit()
if err != nil {
return "", false, err
}
if buildkitURL == okteto.CloudBuildKitURL {
log.Information("Running your build in Okteto Cloud...")
} else {
log.Information("Running your build in Okteto Enterprise...")
}
return buildkitURL, true, err
}
//getSolveOpt returns the buildkit solve options
func getSolveOpt(buildCtx, file, imageTag, target string, noCache bool, buildArgs []string) (*client.SolveOpt, error) {
if file == "" {
file = filepath.Join(buildCtx, "Dockerfile")
}
if _, err := os.Stat(file); os.IsNotExist(err) {
return nil, fmt.Errorf("Dockerfile '%s' does not exist", file)
}
localDirs := map[string]string{
"context": buildCtx,
"dockerfile": filepath.Dir(file),
}
frontendAttrs := map[string]string{
"filename": filepath.Base(file),
}
if target != "" {
frontendAttrs["target"] = target
}
if noCache {
frontendAttrs["no-cache"] = ""
}
for _, buildArg := range buildArgs {
kv := strings.SplitN(buildArg, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid build-arg value %s", buildArg)
}
frontendAttrs["build-arg:"+kv[0]] = kv[1]
}
attachable := []session.Attachable{}
token, err := okteto.GetToken()
if err == nil {
registryURL, err := okteto.GetRegistry()
if err != nil {
return nil, err
}
attachable = append(attachable, newDockerAndOktetoAuthProvider(registryURL, okteto.GetUserID(), token.Token, os.Stderr))
} else {
attachable = append(attachable, authprovider.NewDockerAuthProvider(os.Stderr))
}
opt := &client.SolveOpt{
LocalDirs: localDirs,
Frontend: frontend,
FrontendAttrs: frontendAttrs,
Session: attachable,
}
if imageTag != "" {
opt.Exports = []client.ExportEntry{
{
Type: "image",
Attrs: map[string]string{
"name": imageTag,
"push": "true",
},
},
}
opt.CacheExports = []client.CacheOptionsEntry{
{
Type: "inline",
},
}
opt.CacheImports = []client.CacheOptionsEntry{
{
Type: "registry",
Attrs: map[string]string{"ref": imageTag},
},
}
}
return opt, nil
}
func getDockerFile(path, dockerFile string, isOktetoCluster bool) (string, error) {
if dockerFile == "" {
dockerFile = filepath.Join(path, "Dockerfile")
}
if !isOktetoCluster {
return dockerFile, nil
}
fileWithCacheHandler, err := getDockerfileWithCacheHandler(dockerFile)
if err != nil {
return "", errors.Wrap(err, "failed to create temporary build folder")
}
return fileWithCacheHandler, nil
}
func getBuildkitClient(ctx context.Context, isOktetoCluster bool, buildKitHost string) (*client.Client, error) {
if isOktetoCluster {
c, err := getClientForOktetoCluster(ctx, buildKitHost)
if err != nil {
log.Infof("failed to create okteto build client: %s", err)
return nil, okErrors.UserError{E: fmt.Errorf("failed to create okteto build client"), Hint: okErrors.ErrNotLogged.Error()}
}
return c, nil
}
c, err := client.New(ctx, buildKitHost, client.WithFailFast())
if err != nil {
return nil, errors.Wrapf(err, "failed to create build client for %s", buildKitHost)
}
return c, nil
}
func getClientForOktetoCluster(ctx context.Context, buildKitHost string) (*client.Client, error) {
b, err := url.Parse(buildKitHost)
if err != nil {
return nil, errors.Wrapf(err, "invalid buildkit host %s", buildKitHost)
}
okToken, err := okteto.GetToken()
if err != nil {
return nil, errors.Wrapf(err, "failed to get the token")
}
if len(okToken.Token) == 0 {
return nil, fmt.Errorf("auth token missing from token file")
}
creds := client.WithCredentials(b.Hostname(), okteto.GetCertificatePath(), "", "")
oauthToken := &oauth2.Token{
AccessToken: okToken.Token,
}
rpc := client.WithRPCCreds(oauth.NewOauthAccess(oauthToken))
c, err := client.New(ctx, buildKitHost, client.WithFailFast(), creds, rpc)
if err != nil {
return nil, err
}
return c, nil
}
func solveBuild(ctx context.Context, c *client.Client, opt *client.SolveOpt) (string, error) {
var solveResp *client.SolveResponse
ch := make(chan *client.SolveStatus)
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
var err error
solveResp, err = c.Solve(ctx, nil, *opt, ch)
return errors.Wrap(err, "build failed")
})
eg.Go(func() error {
var c console.Console
if cn, err := console.ConsoleFromFile(os.Stderr); err == nil {
c = cn
}
// not using shared context to not disrupt display but let it finish reporting errors
return progressui.DisplaySolveStatus(context.TODO(), "", c, os.Stdout, ch)
})
if err := eg.Wait(); err != nil {
return "", err
}
return solveResp.ExporterResponse["containerimage.digest"], nil
}
|
[
"\"BUILDKIT_HOST\""
] |
[] |
[
"BUILDKIT_HOST"
] |
[]
|
["BUILDKIT_HOST"]
|
go
| 1 | 0 | |
landscape/vault.py
|
import hvac
import os
import sys
import yaml
import base64
import logging
def kubeconfig_context_entry(context_name):
"""
Generates a kubeconfig context entry
Args:
context_name (str): The Kubernetes context
Returns:
context entry for kubeconfig file (dict)
"""
context_entry = {
'name': context_name,
'context': {
'cluster': context_name + '-cluster',
'user': context_name + '-user',
}
}
return context_entry
def kubeconfig_cluster_entry(context_name, k8s_server, ca_cert):
"""
Generates a kubeconfig cluster entry
Args:
context_name (str): The Kubernetes context
k8s_server (str): The URL of the Kubernetes API server
client_key (str): The PEM-encoded CA certificate to verify against
Returns: cluster entry for kubeconfig file (dict)
"""
base64_ca_cert = base64.b64encode(bytes(ca_cert, 'utf-8')).decode('ascii')
cluster_entry = {
'name': context_name + '-cluster',
'cluster': {
'server': k8s_server,
'certificate-authority-data': base64_ca_cert
}
}
return cluster_entry
def kubeconfig_user_entry(context_name, client_cert, client_key):
"""
Generates a kubeconfig user entry
Args:
context_name (str): The Kubernetes context
client_cert (str): The PEM-encoded client cert
client_key (str): The PEM-encoded client key
Returns: user entry for kubeconfig file (dict)
"""
base64_cert = base64.b64encode(bytes(client_cert, 'utf-8')).decode('ascii')
base64_key = base64.b64encode(bytes(client_key, 'utf-8')).decode('ascii')
user_entry = {
'name': context_name + '-user',
'user': {
'client-certificate-data': base64_cert,
'client-key-data': base64_key
}
}
return user_entry
def write_kubeconfig(cfg_path):
"""
Writes a kubernetes client configuration file with values from Vault
Expects Vault to be pre-populated like so:
vault write /secret/k8s_contexts/minikube \
ca_cert='ca_cert_value' \
client_cert='client_cert_value' \
client_key='client_key_value' \
api_server='https://kubernetes.default.svc.cluster.local'
Args:
cfg_path (str): Path to the kubeconfig file being written
Returns:
None
"""
vault_root = '/secret/k8s_contexts'
vault_addr = os.environ.get('VAULT_ADDR')
vault_cacert = os.environ.get('VAULT_CACERT')
vault_token = os.environ.get('VAULT_TOKEN')
vault_client = hvac.Client(url=vault_addr,
token=vault_token,
verify=vault_cacert)
k8sconfig_contents = {}
for context in vault_client.list(vault_root)['data']['keys']:
clustercfg_root = vault_root + '/' + context
print("Reading kubeconfig settings from {0}".format(clustercfg_root))
try:
vault_clustercfg = vault_client.read(clustercfg_root)
except hvac.exceptions.InvalidRequest:
sys.exit("Failed to read from Vault. Check VAULT_ vars")
if not vault_clustercfg:
sys.exit("No entry {0} found in Vault path {1}".format(context,
vault_root))
vault_data = vault_clustercfg['data']
server_addr = vault_data['api_server']
server_cacert = vault_data['ca_cert']
client_cert = vault_data['client_cert']
client_key = vault_data['client_key']
context_contents = gen_k8sconf(k8s_context=context,
api_server=server_addr,
ca_cert=server_cacert,
client_auth_cert=client_cert,
client_auth_key=client_key)
k8sconfig_contents.update(context_contents)
expanded_cfg_path = os.path.expanduser(cfg_path)
cfg_dir = '/'.join(expanded_cfg_path.split('/')[0:-1])
if not os.path.exists(cfg_dir):
print("Creating directory {0}".format(cfg_dir))
os.makedirs(cfg_dir)
with open(expanded_cfg_path, 'w') as kubeconfig:
kubeconfig.write(yaml.dump(k8sconfig_contents,default_flow_style=False))
print("Wrote kubeconfig to {0}".format(expanded_cfg_path))
def gen_k8sconf(k8s_context=None, api_server=None, ca_cert=None,
client_auth_cert=None,
client_auth_key=None):
"""
Generate a kubeconfig object
Args:
k8s_context (str):
api_server (str):
ca_cert (str):
client_auth_cert (str):
client_auth_key (str):
Returns: kubeconfig data (dict)
"""
contents = {}
contents['apiVersion'] = 'v1'
contents['kind'] = 'Config'
contents['preferences'] = {}
contents['clusters'] = []
contents['contexts'] = []
contents['users'] = []
contents['current-context'] = k8s_context
vault_context_entry = kubeconfig_context_entry(k8s_context)
vault_cluster_entry = kubeconfig_cluster_entry(k8s_context,
api_server,
ca_cert)
vault_user_entry = kubeconfig_user_entry(k8s_context,
client_auth_cert,
client_auth_key)
contents['contexts'].append(vault_context_entry)
contents['clusters'].append(vault_cluster_entry)
contents['users'].append(vault_user_entry)
return contents
def read_kubeconfig(cfg_path):
"""
Reads the current kubeconfig file and places it into Vault
"""
k8sconfig_contents = {}
with open(cfg_path, 'r') as stream:
try:
k8sconfig_contents = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
contexts = k8sconfig_contents['contexts']
clusters = k8sconfig_contents['clusters']
users = k8sconfig_contents['users']
for context in contexts:
# kubeconfig context entries
context_name = context['name']
# gke clusters are set with GOOGLE_CREDENTIALS, not here
if context_name.startswith('gke_'):
continue
context_cluster = context['context']['cluster']
context_user = context['context']['user']
# kubeconfig cluster entries
cluster_cacert = ''
client_auth_cert = ''
client_auth_key = ''
cluster_cfg = [d for d in clusters if d['name'] == context_cluster][0]
cluster_server = cluster_cfg['cluster']['server']
if 'certificate-authority-data' in cluster_cfg['cluster']:
ca_cert_data = cluster_cfg['cluster']['certificate-authority-data']
cluster_cacert = base64.b64encode(bytes(ca_cert_data, 'utf-8')).decode('ascii')
elif 'certificate-authority' in cluster_cfg['cluster']:
cacert_file = cluster_cfg['cluster']['certificate-authority']
if cacert_file.startswith('/'):
cacert_path = cacert_file
else:
cacert_path = os.path.expanduser('~/.kube/' + cacert_file)
with open(cacert_path, 'r') as stream:
try:
cluster_cacert = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
else:
raise "no user certificate-authority(-data) entry in kubeconfig"
# kubeconfig user entries
user_cfg = [d for d in users if d['name'] == context_user][0]
print("user_cfg={0}".format(user_cfg))
if 'client-certificate-data' in user_cfg['user']:
client_cert_data = user_cfg['user']['client-certificate-data']
client_key_data = user_cfg['user']['client-key-data']
client_auth_cert = base64.b64encode(bytes(client_cert_data, 'utf-8')).decode('ascii')
client_auth_key = base64.b64encode(bytes(client_key_data, 'utf-8')).decode('ascii')
elif 'client-certificate' in user_cfg['user']:
client_cert_file = user_cfg['user']['client-certificate']
client_key_file = user_cfg['user']['client-key']
# client cert
if client_cert_file.startswith('/'):
client_cert_path = client_cert_file
else:
client_cert_path = os.path.expanduser('~/.kube/' + client_cert_file)
with open(client_cert_path, 'r') as stream:
try:
client_auth_cert = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
# client key
if client_key_file.startswith('/'):
client_key_path = client_key_file
else:
client_key_path = os.path.expanduser('~/.kube/' + client_key_file)
with open(client_key_path, 'r') as stream:
try:
client_auth_key = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
raise "read_kubeconfig not implemented"
class VaultClient(object):
"""Connects to and authenticates with Vault
Attributes:
__vault_client (hvac.Client): Client connected to Vault
"""
def __init__(self):
vault_addr = os.environ.get('VAULT_ADDR')
vault_cacert = os.environ.get('VAULT_CACERT')
vault_token = os.environ.get('VAULT_TOKEN')
self.logger = logging.getLogger(__name__)
logging.debug(" - VAULT_ADDR is {0}".format(vault_addr))
logging.debug(" - VAULT_CACERT is {0}".format(vault_cacert))
# Raise error if VAUT_ environment variables not set
missing_fmt_string = '{0} missing in environment'
if not vault_addr:
raise ValueError(missing_fmt_string.format('VAULT_ADDR'))
if not vault_token:
raise ValueError(missing_fmt_string.format('VAULT_TOKEN'))
if vault_addr.startswith('https://') and not vault_cacert:
raise ValueError(missing_fmt_string.format('VAULT_CACERT'))
self.__vault_client = hvac.Client(url=vault_addr,
token=vault_token,
verify=vault_cacert)
def dump_vault_from_prefix(self, path_prefix, strip_root_key=False):
"""
Dump Vault data at prefix into dict.
strip_root_key argument used for recursive path-stripping.
Set to True when you call the method outside of itself (non-recursively)
Args:
path_prefix (str): The prefix which to dump
strip_root_key (bool): Strip the root key from return value
Returns:
Data from Vault at prefix (dict)
"""
all_values_at_prefix = {}
logging.debug(" - reading vault subkeys at {0}".format(path_prefix))
subkeys_at_prefix = self.__vault_client.list(path_prefix)
logging.debug(" - subkeys are {0}".format(subkeys_at_prefix))
# use last vault key (delimited by '/') as dict index
prefix_keyname = path_prefix.split('/')[-1]
if not prefix_keyname in all_values_at_prefix:
all_values_at_prefix[prefix_keyname] = {}
# look in Vault path for subkeys. If they exist, recurse.
if subkeys_at_prefix:
for subkey in subkeys_at_prefix['data']['keys']:
prefixed_key = path_prefix + '/' + subkey
sub_vault_key = self.dump_vault_from_prefix(prefixed_key)
all_values_at_prefix[prefix_keyname].update(sub_vault_key)
else:
vault_item_data = self.get_vault_data(path_prefix)
all_values_at_prefix[prefix_keyname].update(vault_item_data)
# Generate full paths to the vault item.
# Set it to True when called from outside this method
# It'll handle the rest
if strip_root_key == True:
retval = all_values_at_prefix[prefix_keyname]
else:
retval = all_values_at_prefix
return retval
def get_vault_data(self, vault_path):
"""
Get Vault data for a specific path
Args:
vault_path (str): path to Vault item
Returns:
Vault secret contents (dict)
"""
vault_error_read_str = 'Vault read at path: {0} error: {1}'
vault_error_data_str = 'Vault data missing at path: {0}'
try:
vault_item_contents = self.__vault_client.read(vault_path)
except ValueError as e:
raise ValueError(vault_error_read_str.format(vault_path, e))
if vault_item_contents and 'data' in vault_item_contents:
return vault_item_contents['data']
else:
raise ValueError(vault_error_data_str.format(vault_path))
def list_vault_prefix(self, vault_path):
"""
Get Vault data for a specific path
Args:
vault_path (str): path to Vault item
Returns:
Vault secret contents (dict)
"""
vault_error_read_str = 'Vault read at path: {0} error: {1}'
vault_error_data_str = 'Vault data missing at path: {0}'
try:
vault_item_list = self.__vault_client.list(vault_path)
except ValueError as e:
raise ValueError(vault_error_read_str.format(vault_path, e))
if vault_item_list and 'data' in vault_item_list:
return vault_item_list['data']
else:
raise ValueError(vault_error_data_str.format(vault_path))
|
[] |
[] |
[
"VAULT_ADDR",
"VAULT_TOKEN",
"VAULT_CACERT"
] |
[]
|
["VAULT_ADDR", "VAULT_TOKEN", "VAULT_CACERT"]
|
python
| 3 | 0 | |
pkg/host/host_linux.go
|
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package host
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/prog"
"github.com/google/syzkaller/sys/linux"
)
func isSupported(c *prog.Syscall, sandbox string) (bool, string) {
if strings.HasPrefix(c.CallName, "syz_") {
return isSupportedSyzkall(sandbox, c)
}
if strings.HasPrefix(c.Name, "socket$") ||
strings.HasPrefix(c.Name, "socketpair$") {
return isSupportedSocket(c)
}
if strings.HasPrefix(c.Name, "openat$") {
return isSupportedOpenAt(c)
}
if strings.HasPrefix(c.Name, "mount$") {
return isSupportedMount(c, sandbox)
}
if c.Name == "ioctl$EXT4_IOC_SHUTDOWN" && sandbox == "none" {
// Don't shutdown root filesystem.
return false, "unsafe with sandbox=none"
}
// There are 3 possible strategies for detecting supported syscalls:
// 1. Executes all syscalls with presumably invalid arguments and check for ENOprog.
// But not all syscalls are safe to execute. For example, pause will hang,
// while setpgrp will push the process into own process group.
// 2. Check presence of /sys/kernel/debug/tracing/events/syscalls/sys_enter_* files.
// This requires root and CONFIG_FTRACE_SYSCALLS. Also it lies for some syscalls.
// For example, on x86_64 it says that sendfile is not present (only sendfile64).
// 3. Check sys_syscallname in /proc/kallsyms.
// Requires CONFIG_KALLSYMS.
// Kallsyms seems to be the most reliable and fast. That's what we use first.
// If kallsyms is not present, we fallback to execution of syscalls.
kallsymsOnce.Do(func() {
kallsyms, _ = ioutil.ReadFile("/proc/kallsyms")
})
if !testFallback && len(kallsyms) != 0 {
return isSupportedKallsyms(c)
}
return isSupportedTrial(c)
}
func isSupportedKallsyms(c *prog.Syscall) (bool, string) {
name := c.CallName
if newname := kallsymsMap[name]; newname != "" {
name = newname
}
if !bytes.Contains(kallsyms, []byte(" T sys_"+name+"\n")) &&
!bytes.Contains(kallsyms, []byte(" T ksys_"+name+"\n")) &&
!bytes.Contains(kallsyms, []byte(" T __ia32_sys_"+name+"\n")) &&
!bytes.Contains(kallsyms, []byte(" T __x64_sys_"+name+"\n")) {
return false, fmt.Sprintf("sys_%v is not present in /proc/kallsyms", name)
}
return true, ""
}
func isSupportedTrial(c *prog.Syscall) (bool, string) {
switch c.CallName {
// These known to cause hangs.
case "exit", "pause":
return true, ""
}
trialMu.Lock()
defer trialMu.Unlock()
if res, ok := trialSupported[c.NR]; ok {
return res, "ENOSYS"
}
cmd := osutil.Command(os.Args[0])
cmd.Env = []string{fmt.Sprintf("SYZ_TRIAL_TEST=%v", c.NR)}
_, err := osutil.Run(10*time.Second, cmd)
res := err != nil
trialSupported[c.NR] = res
return res, "ENOSYS"
}
func init() {
str := os.Getenv("SYZ_TRIAL_TEST")
if str == "" {
return
}
nr, err := strconv.Atoi(str)
if err != nil {
panic(err)
}
arg := ^uintptr(0) - 1e4 // something as invalid as possible
_, _, err = syscall.Syscall6(uintptr(nr), arg, arg, arg, arg, arg, arg)
if err == syscall.ENOSYS {
os.Exit(0)
}
os.Exit(1)
}
// Some syscall names diverge in __NR_* consts and kallsyms.
// umount2 is renamed to umount in arch/x86/entry/syscalls/syscall_64.tbl.
// Where umount is renamed to oldumount is unclear.
var (
kallsyms []byte
kallsymsOnce sync.Once
kallsymsMap = map[string]string{
"umount": "oldumount",
"umount2": "umount",
}
trialMu sync.Mutex
trialSupported = make(map[uint64]bool)
filesystems []byte
filesystemsOnce sync.Once
)
// The function is lengthy as it handles all pseudo-syscalls,
// but it does not seem to cause comprehension problems as there is no shared state.
// Splitting this per-syscall will only increase code size.
// nolint: gocyclo
func isSupportedSyzkall(sandbox string, c *prog.Syscall) (bool, string) {
switch c.CallName {
case "syz_open_dev":
if _, ok := c.Args[0].(*prog.ConstType); ok {
// This is for syz_open_dev$char/block.
// They are currently commented out, but in case one enables them.
return true, ""
}
fname, ok := extractStringConst(c.Args[0])
if !ok {
panic("first open arg is not a pointer to string const")
}
var check func(dev string) bool
check = func(dev string) bool {
if !strings.Contains(dev, "#") {
// Note: don't try to open them all, some can hang (e.g. /dev/snd/pcmC#D#p).
return osutil.IsExist(dev)
}
for i := 0; i < 10; i++ {
if check(strings.Replace(dev, "#", strconv.Itoa(i), 1)) {
return true
}
}
return false
}
if !check(fname) {
return false, fmt.Sprintf("file %v does not exist", fname)
}
return onlySandboxNoneOrNamespace(sandbox)
case "syz_open_procfs":
return true, ""
case "syz_open_pts":
return true, ""
case "syz_emit_ethernet", "syz_extract_tcp_res":
reason := checkNetworkInjection()
return reason == "", reason
case "syz_kvm_setup_cpu":
switch c.Name {
case "syz_kvm_setup_cpu$x86":
if runtime.GOARCH == "amd64" || runtime.GOARCH == "386" {
return true, ""
}
case "syz_kvm_setup_cpu$arm64":
if runtime.GOARCH == "arm64" {
return true, ""
}
}
return false, "unsupported arch"
case "syz_init_net_socket":
// Unfortunately this only works with sandbox none at the moment.
// The problem is that setns of a network namespace requires CAP_SYS_ADMIN
// in the target namespace, and we've lost all privs in the init namespace
// during creation of a user namespace.
if ok, reason := onlySandboxNone(sandbox); !ok {
return false, reason
}
return isSupportedSocket(c)
case "syz_genetlink_get_family_id":
fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_GENERIC)
if fd == -1 {
return false, fmt.Sprintf("socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC) failed: %v", err)
}
syscall.Close(fd)
return true, ""
case "syz_mount_image":
if ok, reason := onlySandboxNone(sandbox); !ok {
return ok, reason
}
fstype, ok := extractStringConst(c.Args[0])
if !ok {
panic("syz_mount_image arg is not string")
}
return isSupportedFilesystem(fstype)
case "syz_read_part_table":
return onlySandboxNone(sandbox)
case "syz_execute_func":
return true, ""
}
panic("unknown syzkall: " + c.Name)
}
func onlySandboxNone(sandbox string) (bool, string) {
if syscall.Getuid() != 0 || sandbox != "none" {
return false, "only supported under root with sandbox=none"
}
return true, ""
}
func onlySandboxNoneOrNamespace(sandbox string) (bool, string) {
if syscall.Getuid() != 0 || sandbox == "setuid" {
return false, "only supported under root with sandbox=none/namespace"
}
return true, ""
}
func isSupportedSocket(c *prog.Syscall) (bool, string) {
af, ok := c.Args[0].(*prog.ConstType)
if !ok {
panic("socket family is not const")
}
fd, err := syscall.Socket(int(af.Val), 0, 0)
if fd != -1 {
syscall.Close(fd)
}
if err == syscall.ENOSYS {
return false, "socket syscall returns ENOSYS"
}
if err == syscall.EAFNOSUPPORT {
return false, "socket family is not supported (EAFNOSUPPORT)"
}
proto, ok := c.Args[2].(*prog.ConstType)
if !ok {
return true, ""
}
var typ uint64
if arg, ok := c.Args[1].(*prog.ConstType); ok {
typ = arg.Val
} else if arg, ok := c.Args[1].(*prog.FlagsType); ok {
typ = arg.Vals[0]
} else {
return true, ""
}
fd, err = syscall.Socket(int(af.Val), int(typ), int(proto.Val))
if fd != -1 {
syscall.Close(fd)
return true, ""
}
return false, err.Error()
}
func isSupportedOpenAt(c *prog.Syscall) (bool, string) {
fname, ok := extractStringConst(c.Args[1])
if !ok || len(fname) == 0 || fname[0] != '/' {
return true, ""
}
fd, err := syscall.Open(fname, syscall.O_RDONLY, 0)
if fd != -1 {
syscall.Close(fd)
}
if err != nil {
return false, fmt.Sprintf("open(%v) failed: %v", fname, err)
}
return true, ""
}
func isSupportedMount(c *prog.Syscall, sandbox string) (bool, string) {
fstype, ok := extractStringConst(c.Args[2])
if !ok {
panic(fmt.Sprintf("%v: filesystem is not string const", c.Name))
}
if ok, reason := isSupportedFilesystem(fstype); !ok {
return ok, reason
}
switch fstype {
case "fuse", "fuseblk":
if err := osutil.IsAccessible("/dev/fuse"); err != nil {
return false, err.Error()
}
return onlySandboxNoneOrNamespace(sandbox)
default:
return onlySandboxNone(sandbox)
}
}
func isSupportedFilesystem(fstype string) (bool, string) {
filesystemsOnce.Do(func() {
filesystems, _ = ioutil.ReadFile("/proc/filesystems")
})
if !bytes.Contains(filesystems, []byte("\t"+fstype+"\n")) {
return false, fmt.Sprintf("/proc/filesystems does not contain %v", fstype)
}
return true, ""
}
func extractStringConst(typ prog.Type) (string, bool) {
ptr, ok := typ.(*prog.PtrType)
if !ok {
panic("first open arg is not a pointer to string const")
}
str, ok := ptr.Type.(*prog.BufferType)
if !ok || str.Kind != prog.BufferString || len(str.Values) == 0 {
return "", false
}
v := str.Values[0]
for len(v) != 0 && v[len(v)-1] == 0 {
v = v[:len(v)-1] // string terminating \x00
}
return v, true
}
func init() {
checkFeature[FeatureCoverage] = checkCoverage
checkFeature[FeatureComparisons] = checkComparisons
checkFeature[FeatureSandboxSetuid] = unconditionallyEnabled
checkFeature[FeatureSandboxNamespace] = checkSandboxNamespace
checkFeature[FeatureFaultInjection] = checkFaultInjection
setupFeature[FeatureFaultInjection] = setupFaultInjection
checkFeature[FeatureLeakChecking] = checkLeakChecking
setupFeature[FeatureLeakChecking] = setupLeakChecking
callbFeature[FeatureLeakChecking] = callbackLeakChecking
checkFeature[FeatureNetworkInjection] = checkNetworkInjection
checkFeature[FeatureNetworkDevices] = checkNetworkDevices
}
func checkCoverage() string {
if reason := checkDebugFS(); reason != "" {
return reason
}
if !osutil.IsExist("/sys/kernel/debug/kcov") {
return "CONFIG_KCOV is not enabled"
}
if err := osutil.IsAccessible("/sys/kernel/debug/kcov"); err != nil {
return err.Error()
}
return ""
}
func checkComparisons() (reason string) {
if reason = checkDebugFS(); reason != "" {
return reason
}
// TODO(dvyukov): this should run under target arch.
// E.g. KCOV ioctls were initially not supported on 386 (missing compat_ioctl),
// and a 386 executor won't be able to use them, but an amd64 fuzzer will be.
fd, err := syscall.Open("/sys/kernel/debug/kcov", syscall.O_RDWR, 0)
if err != nil {
return "CONFIG_KCOV is not enabled"
}
defer syscall.Close(fd)
// Trigger host target lazy initialization, it will fill linux.KCOV_INIT_TRACE.
// It's all wrong and needs to be refactored.
if _, err := prog.GetTarget(runtime.GOOS, runtime.GOARCH); err != nil {
return fmt.Sprintf("failed to get target: %v", err)
}
coverSize := uintptr(64 << 10)
_, _, errno := syscall.Syscall(
syscall.SYS_IOCTL, uintptr(fd), linux.KCOV_INIT_TRACE, coverSize)
if errno != 0 {
return fmt.Sprintf("ioctl(KCOV_INIT_TRACE) failed: %v", errno)
}
mem, err := syscall.Mmap(fd, 0, int(coverSize*unsafe.Sizeof(uintptr(0))),
syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return fmt.Sprintf("KCOV mmap failed: %v", err)
}
defer func() {
if err := syscall.Munmap(mem); err != nil {
reason = fmt.Sprintf("munmap failed: %v", err)
}
}()
_, _, errno = syscall.Syscall(syscall.SYS_IOCTL,
uintptr(fd), linux.KCOV_ENABLE, linux.KCOV_TRACE_CMP)
if errno != 0 {
if errno == 524 { // ENOTSUPP
return "CONFIG_KCOV_ENABLE_COMPARISONS is not enabled"
}
return fmt.Sprintf("ioctl(KCOV_TRACE_CMP) failed: %v", errno)
}
defer func() {
_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), linux.KCOV_DISABLE, 0)
if errno != 0 {
reason = fmt.Sprintf("ioctl(KCOV_DISABLE) failed: %v", errno)
}
}()
return ""
}
func checkFaultInjection() string {
if err := osutil.IsAccessible("/proc/self/make-it-fail"); err != nil {
return "CONFIG_FAULT_INJECTION is not enabled"
}
if err := osutil.IsAccessible("/proc/thread-self/fail-nth"); err != nil {
return "kernel does not have systematic fault injection support"
}
if reason := checkDebugFS(); reason != "" {
return reason
}
if err := osutil.IsAccessible("/sys/kernel/debug/failslab/ignore-gfp-wait"); err != nil {
return "CONFIG_FAULT_INJECTION_DEBUG_FS is not enabled"
}
return ""
}
func setupFaultInjection() error {
if err := osutil.WriteFile("/sys/kernel/debug/failslab/ignore-gfp-wait", []byte("N")); err != nil {
return fmt.Errorf("failed to write /failslab/ignore-gfp-wait: %v", err)
}
if err := osutil.WriteFile("/sys/kernel/debug/fail_futex/ignore-private", []byte("N")); err != nil {
return fmt.Errorf("failed to write /fail_futex/ignore-private: %v", err)
}
if err := osutil.WriteFile("/sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem", []byte("N")); err != nil {
return fmt.Errorf("failed to write /fail_page_alloc/ignore-gfp-highmem: %v", err)
}
if err := osutil.WriteFile("/sys/kernel/debug/fail_page_alloc/ignore-gfp-wait", []byte("N")); err != nil {
return fmt.Errorf("failed to write /fail_page_alloc/ignore-gfp-wait: %v", err)
}
if err := osutil.WriteFile("/sys/kernel/debug/fail_page_alloc/min-order", []byte("0")); err != nil {
return fmt.Errorf("failed to write /fail_page_alloc/min-order: %v", err)
}
return nil
}
func checkLeakChecking() string {
if reason := checkDebugFS(); reason != "" {
return reason
}
if err := osutil.IsAccessible("/sys/kernel/debug/kmemleak"); err != nil {
return "CONFIG_DEBUG_KMEMLEAK is not enabled"
}
return ""
}
func setupLeakChecking() error {
fd, err := syscall.Open("/sys/kernel/debug/kmemleak", syscall.O_RDWR, 0)
if err != nil {
return fmt.Errorf("failed to open /sys/kernel/debug/kmemleak: %v", err)
}
defer syscall.Close(fd)
if _, err := syscall.Write(fd, []byte("scan=off")); err != nil {
// kmemleak returns EBUSY when kmemleak is already turned off.
if err != syscall.EBUSY {
return fmt.Errorf("write(kmemleak, scan=off) failed: %v", err)
}
}
// Flush boot leaks.
if _, err := syscall.Write(fd, []byte("scan")); err != nil {
return fmt.Errorf("write(kmemleak, scan) failed: %v", err)
}
time.Sleep(5 * time.Second) // account for MSECS_MIN_AGE
if _, err := syscall.Write(fd, []byte("scan")); err != nil {
return fmt.Errorf("write(kmemleak, scan) failed: %v", err)
}
if _, err := syscall.Write(fd, []byte("clear")); err != nil {
return fmt.Errorf("write(kmemleak, clear) failed: %v", err)
}
return nil
}
func callbackLeakChecking() {
start := time.Now()
fd, err := syscall.Open("/sys/kernel/debug/kmemleak", syscall.O_RDWR, 0)
if err != nil {
panic(err)
}
defer syscall.Close(fd)
// KMEMLEAK has false positives. To mitigate most of them, it checksums
// potentially leaked objects, and reports them only on the next scan
// iff the checksum does not change. Because of that we do the following
// intricate dance:
// Scan, sleep, scan again. At this point we can get some leaks.
// If there are leaks, we sleep and scan again, this can remove
// false leaks. Then, read kmemleak again. If we get leaks now, then
// hopefully these are true positives during the previous testing cycle.
if _, err := syscall.Write(fd, []byte("scan")); err != nil {
panic(err)
}
time.Sleep(time.Second)
// Account for MSECS_MIN_AGE
// (1 second less because scanning will take at least a second).
for time.Since(start) < 4*time.Second {
time.Sleep(time.Second)
}
if _, err := syscall.Write(fd, []byte("scan")); err != nil {
panic(err)
}
buf := make([]byte, 128<<10)
n, err := syscall.Read(fd, buf)
if err != nil {
panic(err)
}
if n != 0 {
time.Sleep(time.Second)
if _, err := syscall.Write(fd, []byte("scan")); err != nil {
panic(err)
}
n, err := syscall.Read(fd, buf)
if err != nil {
panic(err)
}
nleaks := 0
for buf = buf[:n]; len(buf) != 0; {
end := bytes.Index(buf[1:], []byte("unreferenced object"))
if end != -1 {
end++
} else {
end = len(buf)
}
report := buf[:end]
buf = buf[end:]
if kmemleakIgnore(report) {
continue
}
// BUG in output should be recognized by manager.
fmt.Printf("BUG: memory leak\n%s\n", report)
nleaks++
}
if nleaks != 0 {
os.Exit(1)
}
}
if _, err := syscall.Write(fd, []byte("clear")); err != nil {
panic(err)
}
}
func kmemleakIgnore(report []byte) bool {
// kmemleak has a bunch of false positives (at least what looks like
// false positives at first glance). So we are conservative with what we report.
// First, we filter out any allocations that don't come from executor processes.
// Second, we ignore a bunch of functions entirely.
// Ideally, someone should debug/fix all these cases and remove ignores.
if !bytes.Contains(report, []byte(`comm "syz-executor`)) {
return true
}
for _, ignore := range []string{
" copy_process",
" do_execveat_common",
" __ext4_",
" get_empty_filp",
" do_filp_open",
" new_inode",
} {
if bytes.Contains(report, []byte(ignore)) {
return true
}
}
return false
}
func checkSandboxNamespace() string {
if err := osutil.IsAccessible("/proc/self/ns/user"); err != nil {
return err.Error()
}
return ""
}
func checkNetworkInjection() string {
if err := osutil.IsAccessible("/dev/net/tun"); err != nil {
return err.Error()
}
return checkNetworkDevices()
}
func checkNetworkDevices() string {
if _, err := exec.LookPath("ip"); err != nil {
return "ip command is not found"
}
return ""
}
func checkDebugFS() string {
if err := osutil.IsAccessible("/sys/kernel/debug"); err != nil {
return "debugfs is not enabled or not mounted"
}
return ""
}
|
[
"\"SYZ_TRIAL_TEST\""
] |
[] |
[
"SYZ_TRIAL_TEST"
] |
[]
|
["SYZ_TRIAL_TEST"]
|
go
| 1 | 0 | |
pkg/cortex/serve/init/script.py
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import json
import sys
from cortex_internal.lib.log import configure_logger
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
from cortex_internal.lib.type import (
predictor_type_from_api_spec,
PythonPredictorType,
TensorFlowPredictorType,
TensorFlowNeuronPredictorType,
ONNXPredictorType,
)
from cortex_internal.lib.model import (
FileBasedModelsTreeUpdater, # only when num workers > 1
TFSModelLoader,
)
from cortex_internal.lib.api import get_spec
from cortex_internal.lib.checkers.pod import wait_neuron_rtd
def prepare_tfs_servers_api(api_spec: dict, model_dir: str) -> TFSModelLoader:
# get TFS address-specific details
tf_serving_host = os.getenv("CORTEX_TF_SERVING_HOST", "localhost")
tf_base_serving_port = int(os.getenv("CORTEX_TF_BASE_SERVING_PORT", "9000"))
# determine if multiple TF processes are required
num_processes = 1
has_multiple_tf_servers = os.getenv("CORTEX_MULTIPLE_TF_SERVERS")
if has_multiple_tf_servers:
num_processes = int(os.environ["CORTEX_PROCESSES_PER_REPLICA"])
# initialize models for each TF process
addresses = []
for w in range(int(num_processes)):
addresses.append(f"{tf_serving_host}:{tf_base_serving_port+w}")
if len(addresses) == 1:
return TFSModelLoader(
interval=10,
api_spec=api_spec,
address=addresses[0],
tfs_model_dir=model_dir,
download_dir=model_dir,
)
return TFSModelLoader(
interval=10,
api_spec=api_spec,
addresses=addresses,
tfs_model_dir=model_dir,
download_dir=model_dir,
)
def are_models_specified(api_spec: dict) -> bool:
"""
Checks if models have been specified in the API spec (cortex.yaml).
Args:
api_spec: API configuration.
"""
predictor_type = predictor_type_from_api_spec(api_spec)
if predictor_type == PythonPredictorType and api_spec["predictor"]["multi_model_reloading"]:
models = api_spec["predictor"]["multi_model_reloading"]
elif predictor_type != PythonPredictorType:
models = api_spec["predictor"]["models"]
else:
return False
return models is not None
def is_model_caching_enabled(api_spec: dir) -> bool:
predictor_type = predictor_type_from_api_spec(api_spec)
if predictor_type == PythonPredictorType and api_spec["predictor"]["multi_model_reloading"]:
models = api_spec["predictor"]["multi_model_reloading"]
elif predictor_type != PythonPredictorType:
models = api_spec["predictor"]["models"]
else:
return False
return models and models["cache_size"] and models["disk_cache_size"]
def main():
# wait until neuron-rtd sidecar is ready
uses_inferentia = os.getenv("CORTEX_ACTIVE_NEURON")
if uses_inferentia:
wait_neuron_rtd()
# strictly for Inferentia
has_multiple_tf_servers = os.getenv("CORTEX_MULTIPLE_TF_SERVERS")
num_processes = int(os.environ["CORTEX_PROCESSES_PER_REPLICA"])
if has_multiple_tf_servers:
base_serving_port = int(os.environ["CORTEX_TF_BASE_SERVING_PORT"])
used_ports = {}
for w in range(int(num_processes)):
used_ports[str(base_serving_port + w)] = False
with open("/run/used_ports.json", "w+") as f:
json.dump(used_ports, f)
# get API spec
provider = os.environ["CORTEX_PROVIDER"]
spec_path = os.environ["CORTEX_API_SPEC"]
cache_dir = os.getenv("CORTEX_CACHE_DIR")
region = os.getenv("AWS_REGION") # when it's deployed to AWS
_, api_spec = get_spec(provider, spec_path, cache_dir, region)
predictor_type = predictor_type_from_api_spec(api_spec)
multiple_processes = api_spec["predictor"]["processes_per_replica"] > 1
caching_enabled = is_model_caching_enabled(api_spec)
model_dir = os.getenv("CORTEX_MODEL_DIR")
# start live-reloading when model caching not enabled > 1
cron = None
if not caching_enabled:
# create cron dirs if they don't exist
os.makedirs("/run/cron", exist_ok=True)
os.makedirs("/tmp/cron", exist_ok=True)
# prepare crons
if predictor_type in [PythonPredictorType, ONNXPredictorType] and are_models_specified(
api_spec
):
cron = FileBasedModelsTreeUpdater(
interval=10,
api_spec=api_spec,
download_dir=model_dir,
)
cron.start()
elif predictor_type == TensorFlowPredictorType:
tf_serving_port = os.getenv("CORTEX_TF_BASE_SERVING_PORT", "9000")
tf_serving_host = os.getenv("CORTEX_TF_SERVING_HOST", "localhost")
cron = TFSModelLoader(
interval=10,
api_spec=api_spec,
address=f"{tf_serving_host}:{tf_serving_port}",
tfs_model_dir=model_dir,
download_dir=model_dir,
)
cron.start()
elif predictor_type == TensorFlowNeuronPredictorType:
cron = prepare_tfs_servers_api(api_spec, model_dir)
cron.start()
# wait until the cron finishes its first pass
if cron:
while cron.is_alive() and not cron.ran_once():
time.sleep(0.25)
# disable live reloading when the BatchAPI kind is used
# disable live reloading for the TF predictor when Inferentia is used and when multiple processes are used (num procs > 1)
if api_spec["kind"] != "RealtimeAPI" or (
predictor_type == TensorFlowNeuronPredictorType
and has_multiple_tf_servers
and num_processes > 1
):
cron.stop()
# to syncronize with the other serving processes
open("/mnt/workspace/init_script_run.txt", "a").close()
# don't exit the script if the cron is running
while cron and cron.is_alive():
time.sleep(0.25)
# exit if cron has exited with errors
if cron and isinstance(cron.exitcode, int) and cron.exitcode != 0:
# if it was killed by a catchable signal
if cron.exitcode < 0:
sys.exit(-cron.exitcode)
sys.exit(cron.exitcode)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CORTEX_CACHE_DIR",
"CORTEX_LOG_CONFIG_FILE",
"CORTEX_API_SPEC",
"AWS_REGION",
"CORTEX_TF_SERVING_HOST",
"CORTEX_TF_BASE_SERVING_PORT",
"CORTEX_MODEL_DIR",
"CORTEX_PROCESSES_PER_REPLICA",
"CORTEX_ACTIVE_NEURON",
"CORTEX_MULTIPLE_TF_SERVERS",
"CORTEX_PROVIDER"
] |
[]
|
["CORTEX_CACHE_DIR", "CORTEX_LOG_CONFIG_FILE", "CORTEX_API_SPEC", "AWS_REGION", "CORTEX_TF_SERVING_HOST", "CORTEX_TF_BASE_SERVING_PORT", "CORTEX_MODEL_DIR", "CORTEX_PROCESSES_PER_REPLICA", "CORTEX_ACTIVE_NEURON", "CORTEX_MULTIPLE_TF_SERVERS", "CORTEX_PROVIDER"]
|
python
| 11 | 0 | |
run.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
os.environ['VIKTOR_ENV'] = "PRODUCTION"
from viktor.app import app
@app.route('/')
def index():
return 'VIKTOR'
if __name__ == '__main__':
app.run(port=5003)
|
[] |
[] |
[
"VIKTOR_ENV"
] |
[]
|
["VIKTOR_ENV"]
|
python
| 1 | 0 | |
manager/controllers/app/suite_test.go
|
// Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package app
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"fybrik.io/fybrik/manager/controllers/utils"
"helm.sh/helm/v3/pkg/release"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
appapi "fybrik.io/fybrik/manager/apis/app/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"fybrik.io/fybrik/pkg/helm"
local "fybrik.io/fybrik/pkg/multicluster/local"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var mgr ctrl.Manager
var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
path, pathErr := os.Getwd()
if pathErr != nil {
logf.Log.Info(pathErr.Error())
}
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{
filepath.Join(path, "..", "..", "..", "charts", "fybrik-crd", "templates"),
},
ErrorIfCRDPathMissing: true,
}
utils.DefaultTestConfiguration(GinkgoT())
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = appapi.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
if os.Getenv("USE_EXISTING_CONTROLLER") == "true" {
logf.Log.Info("Using existing controller in existing cluster...")
fmt.Printf("Using existing controller in existing cluster... \n")
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
} else {
fmt.Printf("Setup fake environment... \n")
controllerNamespace := utils.GetControllerNamespace()
blueprintNamespace := utils.GetBlueprintNamespace()
fmt.Printf("Suite test: Using controller namespace: %s; using blueprint namespace %s\n: ", controllerNamespace, blueprintNamespace)
systemNamespaceSelector := fields.SelectorFromSet(fields.Set{"metadata.namespace": utils.GetSystemNamespace()})
workerNamespaceSelector := fields.SelectorFromSet(fields.Set{"metadata.namespace": blueprintNamespace})
// the testing environment will restrict access to secrets, modules and storage accounts
mgr, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: "localhost:8086",
NewCache: cache.BuilderWithOptions(cache.Options{SelectorsByObject: cache.SelectorsByObject{
&appapi.FybrikModule{}: {Field: systemNamespaceSelector},
&appapi.FybrikStorageAccount{}: {Field: systemNamespaceSelector},
&corev1.Secret{}: {Field: workerNamespaceSelector},
}}),
})
Expect(err).ToNot(HaveOccurred())
// Setup application controller
reconciler := createTestFybrikApplicationController(mgr.GetClient(), mgr.GetScheme())
err = reconciler.SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
// Setup blueprint controller
fakeHelm := helm.NewFake(
&release.Release{
Name: "ra8afad067a6a96084dcb", // Release name is from arrow-flight module
Info: &release.Info{Status: release.StatusDeployed},
}, []*unstructured.Unstructured{},
)
err = NewBlueprintReconciler(mgr, "Blueprint", fakeHelm).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
// Setup plotter controller
clusterMgr, err := local.NewManager(mgr.GetClient(), controllerNamespace)
Expect(err).NotTo(HaveOccurred())
Expect(clusterMgr).NotTo(BeNil())
err = NewPlotterReconciler(mgr, "Plotter", clusterMgr).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
go func() {
err = mgr.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = mgr.GetClient()
Expect(k8sClient.Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: utils.GetSystemNamespace(),
},
}))
Expect(k8sClient.Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: blueprintNamespace,
},
}))
Expect(k8sClient.Create(context.Background(), &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-metadata",
Namespace: controllerNamespace,
},
Data: map[string]string{
"ClusterName": "thegreendragon",
"Zone": "hobbiton",
"Region": "theshire",
"VaultAuthPath": "kind",
},
}))
}
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
[
"\"USE_EXISTING_CONTROLLER\""
] |
[] |
[
"USE_EXISTING_CONTROLLER"
] |
[]
|
["USE_EXISTING_CONTROLLER"]
|
go
| 1 | 0 | |
main.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"strings"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/prometheus/common/log"
//cni "github.com/containernetworking/cni/pkg/types"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nmstate "github.com/nmstate/kubernetes-nmstate/api/v1alpha1"
virtv1 "kubevirt.io/client-go/api/v1"
metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
sriovnetworkv1 "github.com/openshift/sriov-network-operator/api/v1"
ospdirectorv1beta1 "github.com/openstack-k8s-operators/osp-director-operator/api/v1beta1"
"github.com/openstack-k8s-operators/osp-director-operator/controllers"
//cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
//templatev1 "github.com/openshift/api/template/v1"
// +kubebuilder:scaffold:imports
)
const (
// WebhookPort -
WebhookPort = 4343
// WebhookCertDir -
WebhookCertDir = "/apiserver.local.config/certificates"
// WebhookCertName -
WebhookCertName = "apiserver.crt"
// WebhookKeyName -
WebhookKeyName = "apiserver.key"
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(ospdirectorv1beta1.AddToScheme(scheme))
//utilruntime.Must(templatev1.AddToScheme(scheme))
utilruntime.Must(virtv1.AddToScheme(scheme))
utilruntime.Must(nmstate.AddToScheme(scheme))
utilruntime.Must(networkv1.AddToScheme(scheme))
//utilruntime.Must(cdiv1.AddToScheme(scheme))
utilruntime.Must(metal3v1alpha1.AddToScheme(scheme))
utilruntime.Must(machinev1beta1.AddToScheme(scheme))
utilruntime.Must(sriovnetworkv1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var enableWebhooks bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
namespace, err := getWatchNamespace()
if err != nil {
setupLog.Error(err, "failed to get WatchNamespace")
os.Exit(1)
}
options := ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "576d6738.openstack.org",
}
// create multi namespace cache if list of namespaces
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
log.Info(fmt.Sprintf("Namespaces added to the cache: %s", namespace))
} else {
options.Namespace = namespace
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
kclient, err := kubernetes.NewForConfig(cfg)
if err != nil {
log.Error(err, "")
os.Exit(1)
}
if strings.ToLower(os.Getenv("ENABLE_WEBHOOKS")) != "false" {
enableWebhooks = true
// We're just getting a pointer here and overriding the default values
srv := mgr.GetWebhookServer()
srv.CertDir = WebhookCertDir
srv.CertName = WebhookCertName
srv.KeyName = WebhookKeyName
srv.Port = WebhookPort
}
if err = (&controllers.OpenStackControlPlaneReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackControlPlane"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackControlPlane")
os.Exit(1)
}
if err = (&controllers.OpenStackVMSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackVMSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackVMSet")
os.Exit(1)
}
if err = (&controllers.OpenStackProvisionServerReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackProvisionServer"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackProvisionServer")
os.Exit(1)
}
if err = (&controllers.OpenStackBaremetalSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackBaremetalSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackBaremetalSet")
os.Exit(1)
}
if err = (&controllers.OpenStackClientReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackClient"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackClient")
os.Exit(1)
}
if err = (&controllers.OpenStackNetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackNet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackNet")
os.Exit(1)
}
if err = (&controllers.OpenStackIPSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackIPSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackIPSet")
os.Exit(1)
}
if err = (&controllers.OpenStackPlaybookGeneratorReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackPlaybookGenerator"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackPlaybookGenerator")
os.Exit(1)
}
if err = (&controllers.OpenStackEphemeralHeatReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackEphemeralHeat"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackEphemeralHeat")
os.Exit(1)
}
if err = (&controllers.OpenStackMACAddressReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackMACAddress"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackMACAddress")
os.Exit(1)
}
if enableWebhooks {
if err = (&ospdirectorv1beta1.OpenStackBaremetalSet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackBaremetalSet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackControlPlane{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackControlPlane")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackVMSet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackVMSet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackNet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackNet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackEphemeralHeat{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackEphemeralHeat")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackProvisionServer{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackProvisionServer")
os.Exit(1)
}
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
// getWatchNamespace returns the Namespace the operator should be watching for changes
func getWatchNamespace() (string, error) {
// WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE
// which specifies the Namespace to watch.
// An empty value means the operator is running with cluster scope.
var watchNamespaceEnvVar = "WATCH_NAMESPACE"
ns, found := os.LookupEnv(watchNamespaceEnvVar)
if !found {
return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar)
}
return ns, nil
}
|
[
"\"ENABLE_WEBHOOKS\""
] |
[] |
[
"ENABLE_WEBHOOKS"
] |
[]
|
["ENABLE_WEBHOOKS"]
|
go
| 1 | 0 | |
enclave_manager/avalon_enclave_manager/wpe/wpe_enclave_manager.py
|
#!/usr/bin/env python3
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import hashlib
import os
import sys
import avalon_enclave_manager.sgx_work_order_request as work_order_request
import avalon_enclave_manager.wpe.wpe_enclave as enclave
import avalon_enclave_manager.wpe.wpe_enclave_info as enclave_info
from avalon_enclave_manager.base_enclave_manager import EnclaveManager
from avalon_enclave_manager.wpe.wpe_requester import WPERequester
from error_code.error_status import WorkOrderStatus
from avalon_enclave_manager.work_order_processor_manager \
import WOProcessorManager
logger = logging.getLogger(__name__)
class WorkOrderProcessorEnclaveManager(WOProcessorManager):
"""
Manager class to handle work order processing in a worker
pool setup
"""
def __init__(self, config):
super().__init__(config)
# Calculate sha256 of enclave id to get 32 bytes. Then take a
# hexdigest for hex str.
enclave_id_utf = self.enclave_id.encode("UTF-8")
self._identity = hashlib.sha256(enclave_id_utf).hexdigest()
# -------------------------------------------------------------------------
def _create_signup_data(self):
"""
Create WPE signup data.
Returns :
signup_data - Relevant signup data to be used for requests to the
enclave
"""
# Instantiate enclaveinfo & initialize enclave in the process
signup_data = enclave_info.WorkOrderProcessorEnclaveInfo(
self._config.get("EnclaveModule"))
self._wpe_requester = WPERequester(self._config)
signup_cpp_obj = enclave.SignupInfoWPE()
# Generate a nonce in trusted code
verification_key_nonce = signup_cpp_obj.GenerateNonce(32)
logger.info("Nonce generated by requester WPE : %s",
verification_key_nonce)
response = self._wpe_requester.get_unique_verification_key(
verification_key_nonce)
if response is None:
logger.error("Failed to get Unique ID from KME")
return None
# Received response contains result,verification_key and
# verification_key_signature delimited by ' '
self._unique_verification_key = response.split(' ')[1]
self._unique_verification_key_signature = response.split(' ')[2]
# Verify unique verification key signature using unique id
result = signup_cpp_obj.VerifyUniqueIdSignature(
self._unique_verification_key,
self._unique_verification_key_signature)
if result != 0:
logger.error("Failed to verify unique id signature")
return None
# signup enclave
signup_data.create_enclave_signup_data(self._unique_verification_key)
# return signup data
logger.info("WPE signup data {}".format(signup_data.proof_data))
return signup_data
# -------------------------------------------------------------------------
def _manager_on_boot(self):
"""
Executes Boot flow of enclave manager
"""
# Extended_measurements is a tuple, viz., basename and measurement
# for the enclave
_, mr_enclave = self.extended_measurements
if self._wpe_requester\
.register_wo_processor(self._unique_verification_key,
self.encryption_key,
self.proof_data,
mr_enclave):
logger.info("WPE registration successful")
# Update mapping of worker_id to workers in a pool
self._worker_kv_delegate.update_worker_map(
self._worker_id, self._identity)
else:
logger.error("WPE registration failed. Cannot proceed further.")
sys.exit(1)
# -------------------------------------------------------------------------
def _execute_wo_in_trusted_enclave(self, input_json_str):
"""
Submits workorder request to Worker enclave and retrieves the response
Parameters :
input_json_str - A JSON formatted str of the request to execute
Returns :
json_response - A JSON response received from the enclave. Errors
are also wrapped in a JSON str if exceptions have
occurred.
"""
try:
pre_proc_output = self._wpe_requester\
.preprocess_work_order(input_json_str, self.encryption_key)
if "error" in pre_proc_output:
# If error in preprocessing response, skip workorder processing
logger.error("Failed to preprocess at WPE enclave manager.")
return pre_proc_output
wo_request = work_order_request.SgxWorkOrderRequest(
"WPE",
input_json_str,
pre_proc_output)
wo_response = wo_request.execute()
except Exception as e:
logger.error("failed to execute work order; %s", str(e))
wo_response = dict()
wo_response["error"] = dict()
wo_response["error"]["code"] = WorkOrderStatus.FAILED
wo_response["error"]["message"] = str(e)
logger.info("unknown enclave type response = %s", wo_response)
return wo_response
# -------------------------------------------------------------------------
def main(args=None):
import config.config as pconfig
import utility.logger as plogger
# parse out the configuration file first
tcf_home = os.environ.get("TCF_HOME", "../../../../")
conf_files = ["wpe_config.toml"]
conf_paths = [".", tcf_home + "/"+"config"]
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file", nargs="+")
parser.add_argument("--config-dir", help="configuration folder", nargs="+")
parser.add_argument("--kme_listener_url",
help="KME listener url for requests to KME",
type=str)
parser.add_argument(
"--worker_id", help="Id of worker in plain text", type=str)
(options, remainder) = parser.parse_known_args(args)
if options.config:
conf_files = options.config
if options.config_dir:
conf_paths = options.config_dir
try:
config = pconfig.parse_configuration_files(conf_files, conf_paths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
if options.kme_listener_url:
config["KMEListener"]["kme_listener_url"] = options.kme_listener_url
if options.worker_id:
config["WorkerConfig"]["worker_id"] = options.worker_id
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(
logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(
logging.getLogger("STDERR"), logging.WARN)
try:
EnclaveManager.parse_command_line(config, remainder)
logger.info("Initialize WorkOrderProcessor enclave_manager")
enclave_manager = WorkOrderProcessorEnclaveManager(config)
logger.info("About to start WorkOrderProcessor Enclave manager")
enclave_manager.start_enclave_manager()
except Exception as e:
logger.error("Exception occurred while running WPE, " +
"exiting WPE enclave manager")
exit(1)
main()
|
[] |
[] |
[
"TCF_HOME"
] |
[]
|
["TCF_HOME"]
|
python
| 1 | 0 | |
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# probfit documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 10 11:16:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import matplotlib
matplotlib.use('Agg')
import sys
from os.path import dirname, join
# For local development we use the `iminuit` from the source folder.
# On readthedocs we use the one from `site-packages`.
# See https://github.com/iminuit/iminuit/issues/126#issuecomment-61472227
# and http://read-the-docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
sys.path.insert(0, join(dirname(__file__), '../'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'probfit'
copyright = u'2012, Piti Ongmongkolkul'
autoclass_content = 'both'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import probfit.info
version = probfit.info.__version__
# The full version, including alpha/beta/rc tags.
release = probfit.info.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_themes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
try:
# https://github.com/snide/sphinx_rtd_theme
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
# Fallback to default theme
print('WARNING: To get nice docs locally that look like the online docs, please do:')
print('WARNING: $ pip install sphinx_rtd_theme --user')
print('WARNING: Using default theme.')
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# if not on_rtd:
# html_theme_path = ['_themes', ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'probfitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'probfit.tex', u'probfit Documentation',
u'Piti Ongmongkolkul', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'probfit', u'probfit Documentation',
[u'Piti Ongmongkolkul'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'probfit', u'probfit Documentation',
u'Piti Ongmongkolkul', 'probfit', 'Fitting Stuff',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.