filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/greentest/greentest/sysinfo.py | # Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import gevent.core
from gevent import _compat as gsysinfo
PYPY = gsysinfo.PYPY
CPYTHON = not PYPY
VERBOSE = sys.argv.count('-v') > 1
WIN = gsysinfo.WIN
LINUX = gsysinfo.LINUX
OSX = gsysinfo.OSX
PURE_PYTHON = gsysinfo.PURE_PYTHON
# XXX: Formalize this better
LIBUV = 'libuv' in gevent.core.loop.__module__ # pylint:disable=no-member
CFFI_BACKEND = PYPY or LIBUV or 'cffi' in os.getenv('GEVENT_LOOP', '')
if '--debug-greentest' in sys.argv:
sys.argv.remove('--debug-greentest')
DEBUG = True
else:
DEBUG = False
RUN_LEAKCHECKS = os.getenv('GEVENTTEST_LEAKCHECK')
RUN_COVERAGE = os.getenv("COVERAGE_PROCESS_START") or os.getenv("GEVENTTEST_COVERAGE")
# Generally, ignore the portions that are only implemented
# on particular platforms; they generally contain partial
# implementations completed in different modules.
PLATFORM_SPECIFIC_SUFFIXES = ['2', '279', '3']
if WIN:
PLATFORM_SPECIFIC_SUFFIXES.append('posix')
PY2 = None
PY3 = None
PY34 = None
PY35 = None
PY36 = None
PY37 = None
NON_APPLICABLE_SUFFIXES = []
if sys.version_info[0] == 3:
# Python 3
NON_APPLICABLE_SUFFIXES.extend(('2', '279'))
PY2 = False
PY3 = True
if sys.version_info[1] >= 4:
PY34 = True
if sys.version_info[1] >= 5:
PY35 = True
if sys.version_info[1] >= 6:
PY36 = True
if sys.version_info[1] >= 7:
PY37 = True
elif sys.version_info[0] == 2:
# Any python 2
PY3 = False
PY2 = True
NON_APPLICABLE_SUFFIXES.append('3')
if (sys.version_info[1] < 7
or (sys.version_info[1] == 7 and sys.version_info[2] < 9)):
# Python 2, < 2.7.9
NON_APPLICABLE_SUFFIXES.append('279')
PYPY3 = PYPY and PY3
PY27_ONLY = sys.version_info[0] == 2 and sys.version_info[1] == 7
PYGTE279 = (
sys.version_info[0] == 2
and sys.version_info[1] >= 7
and sys.version_info[2] >= 9
)
if WIN:
NON_APPLICABLE_SUFFIXES.append("posix")
# This is intimately tied to FileObjectPosix
NON_APPLICABLE_SUFFIXES.append("fileobject2")
SHARED_OBJECT_EXTENSION = ".pyd"
else:
SHARED_OBJECT_EXTENSION = ".so"
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS')
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
if RUNNING_ON_APPVEYOR:
# We can't exec corecext on appveyor if we haven't run setup.py in
# 'develop' mode (i.e., we install)
NON_APPLICABLE_SUFFIXES.append('corecext')
EXPECT_POOR_TIMER_RESOLUTION = (PYPY3
or RUNNING_ON_APPVEYOR
or (LIBUV and PYPY)
or RUN_COVERAGE)
CONN_ABORTED_ERRORS = []
try:
from errno import WSAECONNABORTED
CONN_ABORTED_ERRORS.append(WSAECONNABORTED)
except ImportError:
pass
from errno import ECONNRESET
CONN_ABORTED_ERRORS.append(ECONNRESET)
CONN_ABORTED_ERRORS = frozenset(CONN_ABORTED_ERRORS)
RESOLVER_ARES = os.getenv('GEVENT_RESOLVER') == 'ares'
RESOLVER_DNSPYTHON = os.getenv('GEVENT_RESOLVER') == 'dnspython'
RESOLVER_NOT_SYSTEM = RESOLVER_ARES or RESOLVER_DNSPYTHON
| []
| []
| [
"APPVEYOR",
"GEVENTTEST_COVERAGE",
"COVERAGE_PROCESS_START",
"TRAVIS",
"GEVENTTEST_LEAKCHECK",
"GEVENT_LOOP",
"GEVENT_RESOLVER"
]
| [] | ["APPVEYOR", "GEVENTTEST_COVERAGE", "COVERAGE_PROCESS_START", "TRAVIS", "GEVENTTEST_LEAKCHECK", "GEVENT_LOOP", "GEVENT_RESOLVER"] | python | 7 | 0 | |
run_meson_command_tests.py | #!/usr/bin/env python3
# Copyright 2018 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import subprocess
import zipapp
from pathlib import Path
from mesonbuild.mesonlib import windows_proof_rmtree, python_command, is_windows
from mesonbuild.coredata import version as meson_version
def get_pypath():
import sysconfig
pypath = sysconfig.get_path('purelib', vars={'base': ''})
# Ensure that / is the path separator and not \, then strip /
return Path(pypath).as_posix().strip('/')
def get_pybindir():
import sysconfig
# 'Scripts' on Windows and 'bin' on other platforms including MSYS
return sysconfig.get_path('scripts', vars={'base': ''}).strip('\\/')
class CommandTests(unittest.TestCase):
'''
Test that running meson in various ways works as expected by checking the
value of mesonlib.meson_command that was set during configuration.
'''
def setUp(self):
super().setUp()
self.orig_env = os.environ.copy()
self.orig_dir = os.getcwd()
os.environ['MESON_COMMAND_TESTS'] = '1'
self.tmpdir = Path(tempfile.mkdtemp()).resolve()
self.src_root = Path(__file__).resolve().parent
self.testdir = str(self.src_root / 'test cases/common/1 trivial')
self.meson_args = ['--backend=ninja']
def tearDown(self):
try:
windows_proof_rmtree(str(self.tmpdir))
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
os.chdir(str(self.orig_dir))
super().tearDown()
def _run(self, command, workdir=None):
'''
Run a command while printing the stdout, and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
p = subprocess.run(command, stdout=subprocess.PIPE,
env=os.environ.copy(), universal_newlines=True,
cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, command)
return p.stdout
def assertMesonCommandIs(self, line, cmd):
self.assertTrue(line.startswith('meson_command '), msg=line)
self.assertEqual(line, 'meson_command is {!r}'.format(cmd))
def test_meson_uninstalled(self):
# This is what the meson command must be for all these cases
resolved_meson_command = python_command + [str(self.src_root / 'meson.py')]
# Absolute path to meson.py
os.chdir('/')
builddir = str(self.tmpdir / 'build1')
meson_py = str(self.src_root / 'meson.py')
meson_setup = [meson_py, 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# ./meson.py
os.chdir(str(self.src_root))
builddir = str(self.tmpdir / 'build2')
meson_py = './meson.py'
meson_setup = [meson_py, 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Symlink to meson.py
if is_windows():
# Symlinks require admin perms
return
os.chdir(str(self.src_root))
builddir = str(self.tmpdir / 'build3')
# Create a symlink to meson.py in bindir, and add it to PATH
bindir = (self.tmpdir / 'bin')
bindir.mkdir()
(bindir / 'meson').symlink_to(self.src_root / 'meson.py')
os.environ['PATH'] = str(bindir) + os.pathsep + os.environ['PATH']
# See if it works!
meson_py = 'meson'
meson_setup = [meson_py, 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
def test_meson_installed(self):
# Install meson
prefix = self.tmpdir / 'prefix'
pylibdir = prefix / get_pypath()
bindir = prefix / get_pybindir()
pylibdir.mkdir(parents=True)
# XXX: join with empty name so it always ends with os.sep otherwise
# distutils complains that prefix isn't contained in PYTHONPATH
os.environ['PYTHONPATH'] = os.path.join(str(pylibdir), '')
os.environ['PATH'] = str(bindir) + os.pathsep + os.environ['PATH']
self._run(python_command + ['setup.py', 'install', '--prefix', str(prefix)])
# Fix importlib-metadata by appending all dirs in pylibdir
PYTHONPATHS = [pylibdir] + [x for x in pylibdir.iterdir()]
PYTHONPATHS = [os.path.join(str(x), '') for x in PYTHONPATHS]
os.environ['PYTHONPATH'] = os.pathsep.join(PYTHONPATHS)
# Check that all the files were installed correctly
self.assertTrue(bindir.is_dir())
self.assertTrue(pylibdir.is_dir())
from setup import packages
# Extract list of expected python module files
expect = set()
for pkg in packages:
expect.update([p.as_posix() for p in Path(pkg.replace('.', '/')).glob('*.py')])
# Check what was installed, only count files that are inside 'mesonbuild'
have = set()
for p in Path(pylibdir).glob('**/*.py'):
s = p.as_posix()
if 'mesonbuild' not in s:
continue
if '/data/' in s:
continue
have.add(s[s.rfind('mesonbuild'):])
self.assertEqual(have, expect)
# Run `meson`
os.chdir('/')
resolved_meson_command = [str(bindir / 'meson')]
builddir = str(self.tmpdir / 'build1')
meson_setup = ['meson', 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Run `/path/to/meson`
builddir = str(self.tmpdir / 'build2')
meson_setup = [str(bindir / 'meson'), 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
# Run `python3 -m mesonbuild.mesonmain`
resolved_meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
builddir = str(self.tmpdir / 'build3')
meson_setup = ['-m', 'mesonbuild.mesonmain', 'setup']
meson_command = python_command + meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
if is_windows():
# Next part requires a shell
return
# `meson` is a wrapper to `meson.real`
resolved_meson_command = [str(bindir / 'meson.real')]
builddir = str(self.tmpdir / 'build4')
(bindir / 'meson').rename(bindir / 'meson.real')
wrapper = (bindir / 'meson')
wrapper.open('w').write('#!/bin/sh\n\nmeson.real "$@"')
wrapper.chmod(0o755)
meson_setup = [str(wrapper), 'setup']
meson_command = meson_setup + self.meson_args
stdo = self._run(meson_command + [self.testdir, builddir])
self.assertMesonCommandIs(stdo.split('\n')[0], resolved_meson_command)
def test_meson_exe_windows(self):
raise unittest.SkipTest('NOT IMPLEMENTED')
def test_meson_zipapp(self):
if is_windows():
raise unittest.SkipTest('NOT IMPLEMENTED')
source = Path(__file__).resolve().parent
target = self.tmpdir / 'meson.pyz'
script = source / 'packaging' / 'create_zipapp.py'
self._run([script.as_posix(), source, '--outfile', target, '--interpreter', python_command[0]])
self._run([target.as_posix(), '--help'])
if __name__ == '__main__':
print('Meson build system', meson_version, 'Command Tests')
raise SystemExit(unittest.main(buffer=True))
| []
| []
| [
"PYTHONPATH",
"PATH",
"MESON_COMMAND_TESTS"
]
| [] | ["PYTHONPATH", "PATH", "MESON_COMMAND_TESTS"] | python | 3 | 0 | |
nomurapp/asgi.py | """
ASGI config for nomurapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nomurapp.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
SparkClassNew/SparkRunSomeCommandTests.py | ########
#
# Spark Run Some Command Tests
#
# Program to send commands to Positive Grid Spark
#
# See https://github.com/paulhamsh/Spark-Parser
from AllPresets import *
from SparkClass import *
import socket
import time
import struct
SERVER_PORT = 2
MY_SPARK = "08:EB:ED:4E:47:07" # Change to address of YOUR Spark
def send_receive(b):
cs.send(b)
a=cs.recv(100)
def send_preset(pres):
for i in pres:
cs.send(i)
a=cs.recv(100)
cs.send(change_user_preset[0])
a=cs.recv(100)
def just_send(b):
cs.send(b)
try:
cs = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
cs.connect((MY_SPARK, SERVER_PORT))
print ("Connected successfully")
msg = SparkMessage()
# Run some basic tests
change_user_preset = msg.change_hardware_preset(0x7f)
print("Change to hardware preset 0")
b = msg.change_hardware_preset(0)
send_receive(b[0])
time.sleep(3)
print ("Sweep up gain")
for v in range (0, 100):
val = v*0.01
b = msg.change_effect_parameter ("Twin", 0, val)
just_send(b[0])
time.sleep(0.02)
print ("Change amp from Twin to SLO 100")
b = msg.change_effect ("Twin", "SLO100")
send_receive(b[0])
time.sleep(3)
print ("Change amp from SLO 100 to Twin")
b = msg.change_effect ( "SLO100", "Twin")
send_receive(b[0])
time.sleep(3)
print ("Turn on the Booster pedal")
b = msg.turn_effect_onoff ( "Booster", "On")
send_receive(b[0])
time.sleep(3)
print ("Booster gain to 9")
b = msg.change_effect_parameter ("Booster", 0, 0.9)
just_send(b[0])
time.sleep(3)
print ("Booster gain to 1")
b = msg.change_effect_parameter ("Booster", 0, 0.1)
just_send(b[0])
time.sleep(3)
print ("Turn off Booster")
b = msg.turn_effect_onoff ( "Booster", "Off")
send_receive(b[0])
time.sleep(3)
print ("Turn on the Booster pedal")
b = msg.turn_effect_onoff ( "Booster", "On")
send_receive(b[0])
time.sleep(3)
for i in range (len(preset_list)):
print ("\t", preset_list[i]["Name"])
b = msg.create_preset(preset_list[i])
send_preset(b)
time.sleep(5)
print("Preset ", preset_list[1]["Name"])
b = msg.create_preset(preset_list[8])
send_preset(b)
time.sleep(5)
print ("Finished")
except OSError as e:
print(e)
finally:
if cs is not None:
cs.close()
| []
| []
| []
| [] | [] | python | null | null | null |
sftp.go | package desync
import (
"bytes"
"context"
"io"
"io/ioutil"
"math/rand"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"path"
"github.com/pkg/errors"
"github.com/pkg/sftp"
)
var _ WriteStore = &SFTPStore{}
// SFTPStoreBase is the base object for SFTP chunk and index stores.
type SFTPStoreBase struct {
location *url.URL
path string
client *sftp.Client
cancel context.CancelFunc
opt StoreOptions
}
// SFTPStore is a chunk store that uses SFTP over SSH.
type SFTPStore struct {
pool chan *SFTPStoreBase
location *url.URL
n int
}
// Creates a base sftp client
func newSFTPStoreBase(location *url.URL, opt StoreOptions) (*SFTPStoreBase, error) {
sshCmd := os.Getenv("CASYNC_SSH_PATH")
if sshCmd == "" {
sshCmd = "ssh"
}
host := location.Host
path := location.Path
if !strings.HasSuffix(path, "/") {
path += "/"
}
// If a username was given in the URL, prefix the host
if location.User != nil {
host = location.User.Username() + "@" + location.Host
}
ctx, cancel := context.WithCancel(context.Background())
c := exec.CommandContext(ctx, sshCmd, host, "-s", "sftp")
c.Stderr = os.Stderr
r, err := c.StdoutPipe()
if err != nil {
cancel()
return nil, err
}
w, err := c.StdinPipe()
if err != nil {
cancel()
return nil, err
}
if err = c.Start(); err != nil {
cancel()
return nil, err
}
client, err := sftp.NewClientPipe(r, w)
if err != nil {
cancel()
return nil, err
}
return &SFTPStoreBase{location, path, client, cancel, opt}, nil
}
// StoreObject adds a new object to a writable index or chunk store.
func (s *SFTPStoreBase) StoreObject(name string, r io.Reader) error {
// Write to a tempfile on the remote server. This is not 100% guaranteed to not
// conflict between gorouties, there's no tempfile() function for remote servers.
// Use a large enough random number instead to build a tempfile
tmpfile := name + strconv.Itoa(rand.Int())
d := path.Dir(name)
var errCount int
retry:
f, err := s.client.Create(tmpfile)
if err != nil {
// It's possible the parent dir doesn't yet exist. Create it while ignoring
// errors since that could be racy and fail if another goroutine does the
// same.
if errCount < 1 {
s.client.Mkdir(d)
errCount++
goto retry
}
return errors.Wrap(err, "sftp:create "+tmpfile)
}
if _, err := io.Copy(f, r); err != nil {
s.client.Remove(tmpfile)
return errors.Wrap(err, "sftp:copying chunk data to "+tmpfile)
}
if err = f.Close(); err != nil {
return errors.Wrap(err, "sftp:closing "+tmpfile)
}
return errors.Wrap(s.client.PosixRename(tmpfile, name), "sftp:renaming "+tmpfile+" to "+name)
}
// Close terminates all client connections
func (s *SFTPStoreBase) Close() error {
if s.cancel != nil {
defer s.cancel()
}
return s.client.Close()
}
func (s *SFTPStoreBase) String() string {
return s.location.String()
}
// Returns the path for a chunk
func (s *SFTPStoreBase) nameFromID(id ChunkID) string {
sID := id.String()
name := s.path + sID[0:4] + "/" + sID
if s.opt.Uncompressed {
name += UncompressedChunkExt
} else {
name += CompressedChunkExt
}
return name
}
// NewSFTPStore initializes a chunk store using SFTP over SSH.
func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error) {
s := &SFTPStore{make(chan *SFTPStoreBase, opt.N), location, opt.N}
for i := 0; i < opt.N; i++ {
c, err := newSFTPStoreBase(location, opt)
if err != nil {
return nil, err
}
s.pool <- c
}
return s, nil
}
// GetChunk returns a chunk from an SFTP store, returns ChunkMissing if the file does not exist
func (s *SFTPStore) GetChunk(id ChunkID) (*Chunk, error) {
c := <-s.pool
defer func() { s.pool <- c }()
name := c.nameFromID(id)
f, err := c.client.Open(name)
if err != nil {
if os.IsNotExist(err) {
err = ChunkMissing{id}
}
return nil, err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return nil, errors.Wrapf(err, "unable to read from %s", name)
}
if c.opt.Uncompressed {
return NewChunkWithID(id, b, nil, c.opt.SkipVerify)
}
return NewChunkWithID(id, nil, b, c.opt.SkipVerify)
}
// RemoveChunk deletes a chunk, typically an invalid one, from the filesystem.
// Used when verifying and repairing caches.
func (s *SFTPStore) RemoveChunk(id ChunkID) error {
c := <-s.pool
defer func() { s.pool <- c }()
name := c.nameFromID(id)
if _, err := c.client.Stat(name); err != nil {
return ChunkMissing{id}
}
return c.client.Remove(name)
}
// StoreChunk adds a new chunk to the store
func (s *SFTPStore) StoreChunk(chunk *Chunk) error {
c := <-s.pool
defer func() { s.pool <- c }()
name := c.nameFromID(chunk.ID())
var (
b []byte
err error
)
if c.opt.Uncompressed {
b, err = chunk.Uncompressed()
} else {
b, err = chunk.Compressed()
}
if err != nil {
return err
}
return c.StoreObject(name, bytes.NewReader(b))
}
// HasChunk returns true if the chunk is in the store
func (s *SFTPStore) HasChunk(id ChunkID) (bool, error) {
c := <-s.pool
defer func() { s.pool <- c }()
name := c.nameFromID(id)
_, err := c.client.Stat(name)
return err == nil, nil
}
// Prune removes any chunks from the store that are not contained in a list
// of chunks
func (s *SFTPStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error {
c := <-s.pool
defer func() { s.pool <- c }()
walker := c.client.Walk(c.path)
for walker.Step() {
// See if we're meant to stop
select {
case <-ctx.Done():
return Interrupted{}
default:
}
if err := walker.Err(); err != nil {
return err
}
info := walker.Stat()
if info.IsDir() { // Skip dirs
continue
}
path := walker.Path()
if !strings.HasSuffix(path, CompressedChunkExt) { // Skip files without chunk extension
continue
}
// Skip compressed chunks if this is running in uncompressed mode and vice-versa
var sID string
if c.opt.Uncompressed {
if !strings.HasSuffix(path, UncompressedChunkExt) {
return nil
}
sID = strings.TrimSuffix(filepath.Base(path), UncompressedChunkExt)
} else {
if !strings.HasSuffix(path, CompressedChunkExt) {
return nil
}
sID = strings.TrimSuffix(filepath.Base(path), CompressedChunkExt)
}
// Convert the name into a checksum, if that fails we're probably not looking
// at a chunk file and should skip it.
id, err := ChunkIDFromString(sID)
if err != nil {
continue
}
// See if the chunk we're looking at is in the list we want to keep, if not
// remove it.
if _, ok := ids[id]; !ok {
if err = s.RemoveChunk(id); err != nil {
return err
}
}
}
return nil
}
// Close terminates all client connections
func (s *SFTPStore) Close() error {
var err error
for i := 0; i < s.n; i++ {
c := <-s.pool
err = c.Close()
}
return err
}
func (s *SFTPStore) String() string {
return s.location.String()
}
| [
"\"CASYNC_SSH_PATH\""
]
| []
| [
"CASYNC_SSH_PATH"
]
| [] | ["CASYNC_SSH_PATH"] | go | 1 | 0 | |
vendor/code.cloudfoundry.org/cli/command/v2/space_quotas_command.go | package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
)
type SpaceQuotasCommand struct {
usage interface{} `usage:"CF_NAME space-quotas"`
relatedCommands interface{} `related_commands:"set-space-quota"`
}
func (SpaceQuotasCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (SpaceQuotasCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
| [
"\"CF_TRACE\""
]
| []
| [
"CF_TRACE"
]
| [] | ["CF_TRACE"] | go | 1 | 0 | |
selfdrive/locationd/calibrationd.py | #!/usr/bin/env python3
import os
import copy
import json
import numpy as np
import cereal.messaging as messaging
from selfdrive.locationd.calibration_helpers import Calibration
from selfdrive.swaglog import cloudlog
from common.params import Params, put_nonblocking
from common.transformations.model import model_height
from common.transformations.camera import view_frame_from_device_frame, get_view_frame_from_road_frame, \
get_calib_from_vp, vp_from_rpy, H, W, FOCAL
MPH_TO_MS = 0.44704
MIN_SPEED_FILTER = 15 * MPH_TO_MS
MAX_VEL_ANGLE_STD = np.radians(0.25)
MAX_YAW_RATE_FILTER = np.radians(2) # per second
# This is all 20Hz, blocks needed for efficiency
BLOCK_SIZE = 100
INPUTS_NEEDED = 5 # allow to update VP every so many frames
INPUTS_WANTED = 50 # We want a little bit more than we need for stability
WRITE_CYCLES = 10 # write every 1000 cycles
VP_INIT = np.array([W/2., H/2.])
# These validity corners were chosen by looking at 1000
# and taking most extreme cases with some margin.
VP_VALIDITY_CORNERS = np.array([[W//2 - 120, 300], [W//2 + 120, 520]])
DEBUG = os.getenv("DEBUG") is not None
def is_calibration_valid(vp):
return vp[0] > VP_VALIDITY_CORNERS[0,0] and vp[0] < VP_VALIDITY_CORNERS[1,0] and \
vp[1] > VP_VALIDITY_CORNERS[0,1] and vp[1] < VP_VALIDITY_CORNERS[1,1]
def sanity_clip(vp):
if np.isnan(vp).any():
vp = VP_INIT
return np.array([np.clip(vp[0], VP_VALIDITY_CORNERS[0,0] - 20, VP_VALIDITY_CORNERS[1,0] + 20),
np.clip(vp[1], VP_VALIDITY_CORNERS[0,1] - 20, VP_VALIDITY_CORNERS[1,1] + 20)])
def intrinsics_from_vp(vp):
return np.array([
[FOCAL, 0., vp[0]],
[ 0., FOCAL, vp[1]],
[ 0., 0., 1.]])
class Calibrator():
def __init__(self, param_put=False):
self.param_put = param_put
self.vp = copy.copy(VP_INIT)
self.vps = np.zeros((INPUTS_WANTED, 2))
self.idx = 0
self.block_idx = 0
self.valid_blocks = 0
self.cal_status = Calibration.UNCALIBRATED
self.just_calibrated = False
self.v_ego = 0
# Read calibration
if param_put:
calibration_params = Params().get("CalibrationParams")
else:
calibration_params = None
if calibration_params:
try:
calibration_params = json.loads(calibration_params)
if 'calib_radians' in calibration_params:
self.vp = vp_from_rpy(calibration_params["calib_radians"])
else:
self.vp = np.array(calibration_params["vanishing_point"])
if not np.isfinite(self.vp).all():
self.vp = copy.copy(VP_INIT)
self.vps = np.tile(self.vp, (INPUTS_WANTED, 1))
self.valid_blocks = calibration_params['valid_blocks']
if not np.isfinite(self.valid_blocks) or self.valid_blocks < 0:
self.valid_blocks = 0
self.update_status()
except Exception:
cloudlog.exception("CalibrationParams file found but error encountered")
def update_status(self):
start_status = self.cal_status
if self.valid_blocks < INPUTS_NEEDED:
self.cal_status = Calibration.UNCALIBRATED
else:
self.cal_status = Calibration.CALIBRATED if is_calibration_valid(self.vp) else Calibration.INVALID
end_status = self.cal_status
self.just_calibrated = False
if start_status == Calibration.UNCALIBRATED and end_status == Calibration.CALIBRATED:
self.just_calibrated = True
def handle_v_ego(self, v_ego):
self.v_ego = v_ego
def handle_cam_odom(self, trans, rot, trans_std, rot_std):
straight_and_fast = ((self.v_ego > MIN_SPEED_FILTER) and (trans[0] > MIN_SPEED_FILTER) and (abs(rot[2]) < MAX_YAW_RATE_FILTER))
certain_if_calib = ((np.arctan2(trans_std[1], trans[0]) < MAX_VEL_ANGLE_STD) or
(self.valid_blocks < INPUTS_NEEDED))
if straight_and_fast and certain_if_calib:
# intrinsics are not eon intrinsics, since this is calibrated frame
intrinsics = intrinsics_from_vp(self.vp)
new_vp = intrinsics.dot(view_frame_from_device_frame.dot(trans))
new_vp = new_vp[:2]/new_vp[2]
new_vp = sanity_clip(new_vp)
self.vps[self.block_idx] = (self.idx*self.vps[self.block_idx] + (BLOCK_SIZE - self.idx) * new_vp) / float(BLOCK_SIZE)
self.idx = (self.idx + 1) % BLOCK_SIZE
if self.idx == 0:
self.block_idx += 1
self.valid_blocks = max(self.block_idx, self.valid_blocks)
self.block_idx = self.block_idx % INPUTS_WANTED
if self.valid_blocks > 0:
self.vp = np.mean(self.vps[:self.valid_blocks], axis=0)
self.update_status()
if self.param_put and ((self.idx == 0 and self.block_idx == 0) or self.just_calibrated):
calib = get_calib_from_vp(self.vp)
cal_params = {"calib_radians": list(calib),
"valid_blocks": self.valid_blocks}
put_nonblocking("CalibrationParams", json.dumps(cal_params).encode('utf8'))
return new_vp
else:
return None
def send_data(self, pm):
calib = get_calib_from_vp(self.vp)
extrinsic_matrix = get_view_frame_from_road_frame(0, calib[1], calib[2], model_height)
cal_send = messaging.new_message('liveCalibration')
cal_send.liveCalibration.calStatus = self.cal_status
cal_send.liveCalibration.calPerc = min(100 * (self.valid_blocks * BLOCK_SIZE + self.idx) // (INPUTS_NEEDED * BLOCK_SIZE), 100)
cal_send.liveCalibration.extrinsicMatrix = [float(x) for x in extrinsic_matrix.flatten()]
cal_send.liveCalibration.rpyCalib = [float(x) for x in calib]
pm.send('liveCalibration', cal_send)
def calibrationd_thread(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['cameraOdometry', 'carState'])
if pm is None:
pm = messaging.PubMaster(['liveCalibration'])
calibrator = Calibrator(param_put=True)
send_counter = 0
while 1:
sm.update()
# if no inputs still publish calibration
if not sm.updated['carState'] and not sm.updated['cameraOdometry']:
calibrator.send_data(pm)
continue
if sm.updated['carState']:
calibrator.handle_v_ego(sm['carState'].vEgo)
if send_counter % 25 == 0:
calibrator.send_data(pm)
send_counter += 1
if sm.updated['cameraOdometry']:
new_vp = calibrator.handle_cam_odom(sm['cameraOdometry'].trans,
sm['cameraOdometry'].rot,
sm['cameraOdometry'].transStd,
sm['cameraOdometry'].rotStd)
if DEBUG and new_vp is not None:
print('got new vp', new_vp)
# decimate outputs for efficiency
def main(sm=None, pm=None):
calibrationd_thread(sm, pm)
if __name__ == "__main__":
main()
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
kolibri/deployment/default/settings/base.py | # -*- coding: utf-8 -*-
"""
Django settings for kolibri project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import pytz
from django.conf import locale
from morango.constants import settings as morango_settings
from six.moves.urllib.parse import urljoin
from tzlocal import get_localzone
import kolibri
from kolibri.deployment.default.cache import CACHES
from kolibri.deployment.default.sqlite_db_names import ADDITIONAL_SQLITE_DATABASES
from kolibri.plugins.utils.settings import apply_settings
from kolibri.utils import conf
from kolibri.utils import i18n
from kolibri.utils.logger import get_logging_config
try:
isolation_level = None
import psycopg2 # noqa
isolation_level = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE
except ImportError:
pass
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# import kolibri, so we can get the path to the module.
# we load other utilities related to i18n
# This is essential! We load the kolibri conf INSIDE the Django conf
KOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)
BASE_DIR = os.path.abspath(os.path.dirname(__name__))
LOCALE_PATHS = [os.path.join(KOLIBRI_MODULE_PATH, "locale")]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = conf.OPTIONS["Server"]["DEBUG"]
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"kolibri.core",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_filters",
"kolibri.core.auth.apps.KolibriAuthConfig",
"kolibri.core.bookmarks",
"kolibri.core.content",
"kolibri.core.logger",
"kolibri.core.notifications.apps.KolibriNotificationsConfig",
"kolibri.core.tasks.apps.KolibriTasksConfig",
"kolibri.core.deviceadmin",
"kolibri.core.webpack",
"kolibri.core.exams",
"kolibri.core.device",
"kolibri.core.discovery",
"kolibri.core.lessons",
"kolibri.core.analytics",
"rest_framework",
"django_js_reverse",
"jsonfield",
"morango",
]
MIDDLEWARE = [
"kolibri.core.analytics.middleware.cherrypy_access_log_middleware",
"kolibri.core.device.middleware.ProvisioningErrorHandler",
"kolibri.core.device.middleware.DatabaseBusyErrorHandler",
"django.middleware.cache.UpdateCacheMiddleware",
"kolibri.core.analytics.middleware.MetricsMiddleware",
"kolibri.core.auth.middleware.KolibriSessionMiddleware",
"kolibri.core.device.middleware.KolibriLocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"kolibri.core.auth.middleware.CustomAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.middleware.cache.FetchFromCacheMiddleware",
]
# By default don't cache anything unless it explicitly requests it to!
CACHE_MIDDLEWARE_SECONDS = 0
CACHE_MIDDLEWARE_KEY_PREFIX = "pages"
CACHES = CACHES
ROOT_URLCONF = "kolibri.deployment.default.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"kolibri.core.context_processors.custom_context_processor.developer_mode",
]
},
}
]
WSGI_APPLICATION = "kolibri.deployment.default.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if conf.OPTIONS["Database"]["DATABASE_ENGINE"] == "sqlite":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(
conf.KOLIBRI_HOME,
conf.OPTIONS["Database"]["DATABASE_NAME"] or "db.sqlite3",
),
"OPTIONS": {"timeout": 100},
},
}
for additional_db in ADDITIONAL_SQLITE_DATABASES:
DATABASES[additional_db] = {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(conf.KOLIBRI_HOME, "{}.sqlite3".format(additional_db)),
"OPTIONS": {"timeout": 100},
}
DATABASE_ROUTERS = (
"kolibri.core.notifications.models.NotificationsRouter",
"kolibri.core.device.models.SyncQueueRouter",
"kolibri.core.discovery.models.NetworkLocationRouter",
)
elif conf.OPTIONS["Database"]["DATABASE_ENGINE"] == "postgres":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": conf.OPTIONS["Database"]["DATABASE_NAME"],
"PASSWORD": conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
"USER": conf.OPTIONS["Database"]["DATABASE_USER"],
"HOST": conf.OPTIONS["Database"]["DATABASE_HOST"],
"PORT": conf.OPTIONS["Database"]["DATABASE_PORT"],
"TEST": {"NAME": "test"},
},
"default-serializable": {
"ENGINE": "django.db.backends.postgresql",
"NAME": conf.OPTIONS["Database"]["DATABASE_NAME"],
"PASSWORD": conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
"USER": conf.OPTIONS["Database"]["DATABASE_USER"],
"HOST": conf.OPTIONS["Database"]["DATABASE_HOST"],
"PORT": conf.OPTIONS["Database"]["DATABASE_PORT"],
"OPTIONS": {"isolation_level": isolation_level},
"TEST": {"MIRROR": "default"},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# For language names, see:
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# http://helpsharepointvision.nevron.com/Culture_Table.html
# django-specific format, e.g.: [ ('bn-bd', 'বাংলা'), ('en', 'English'), ...]
LANGUAGES = [
(
i18n.KOLIBRI_LANGUAGE_INFO[lang_code]["intl_code"],
i18n.KOLIBRI_LANGUAGE_INFO[lang_code]["language_name"],
)
for lang_code in conf.OPTIONS["Deployment"]["LANGUAGES"]
if lang_code in i18n.KOLIBRI_LANGUAGE_INFO
]
# Some languages are not supported out-of-the-box by Django
# Here, we use the language code in Intl.js
EXTRA_LANG_INFO = {
"ff-cm": {
"bidi": False,
"code": "ff-cm",
"name": "Fulfulde (Cameroon)",
"name_local": "Fulfulde Mbororoore",
},
"el": {
"bidi": False,
"code": "el",
"name": "Greek",
"name_local": "Ελληνικά",
},
"es-419": {
"bidi": False,
"code": "es-419",
"name": "Spanish (Latin America)",
"name_local": "Español",
},
"es-es": {
"bidi": False,
"code": "es-es",
"name": "Spanish (Spain)",
"name_local": "Español (España)",
},
"fr-ht": {
"bidi": False,
"code": "fr-ht",
"name": "Haitian Creole",
"name_local": "Kreyòl ayisyen",
},
"gu-in": {
"bidi": False,
"code": "gu-in",
"name": "Gujarati",
"name_local": "ગુજરાતી",
},
"ha": {
"bidi": False,
"code": "ha",
"name": "Hausa",
"name_local": "Hausa",
},
"id": {
"bidi": False,
"code": "id",
"name": "Indonesian",
"name_local": "Bahasa Indonesia",
},
"ka": {
"bidi": False,
"code": "ka",
"name": "Georgian",
"name_local": "ქართული",
},
"km": {"bidi": False, "code": "km", "name": "Khmer", "name_local": "ភាសាខ្មែរ"},
"nyn": {
"bidi": False,
"code": "nyn",
"name": "Chichewa, Chewa, Nyanja",
"name_local": "Chinyanja",
},
"pt-mz": {
"bidi": False,
"code": "pt-mz",
"name": "Portuguese (Mozambique)",
"name_local": "Português (Moçambique)",
},
"zh": {
"bidi": False,
"code": "zh-hans",
"name": "Simplified Chinese",
"name_local": "简体中文",
},
"yo": {"bidi": False, "code": "yo", "name": "Yoruba", "name_local": "Yorùbá"},
"zu": {"bidi": False, "code": "zu", "name": "Zulu", "name_local": "isiZulu"},
}
locale.LANG_INFO.update(EXTRA_LANG_INFO)
LANGUAGE_CODE = (
"en"
if "en" in conf.OPTIONS["Deployment"]["LANGUAGES"]
else conf.OPTIONS["Deployment"]["LANGUAGES"][0]
)
try:
TIME_ZONE = get_localzone().zone
except (pytz.UnknownTimeZoneError, ValueError):
# Do not fail at this point because a timezone was not
# detected.
TIME_ZONE = pytz.utc.zone
# Fixes https://github.com/regebro/tzlocal/issues/44
# tzlocal 1.4 returns 'local' if unable to detect the timezone,
# and this TZ id is invalid
if TIME_ZONE == "local":
TIME_ZONE = pytz.utc.zone
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
path_prefix = conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"]
if path_prefix != "/":
path_prefix = "/" + path_prefix
STATIC_URL = urljoin(path_prefix, "static/")
STATIC_ROOT = os.path.join(conf.KOLIBRI_HOME, "static")
MEDIA_URL = urljoin(path_prefix, "media/")
MEDIA_ROOT = os.path.join(conf.KOLIBRI_HOME, "media")
# https://docs.djangoproject.com/en/1.11/ref/settings/#csrf-cookie-path
# Ensure that our CSRF cookie does not collide with other CSRF cookies
# set by other Django apps served from the same domain.
CSRF_COOKIE_PATH = path_prefix
CSRF_COOKIE_NAME = "kolibri_csrftoken"
# https://docs.djangoproject.com/en/1.11/ref/settings/#session-cookie-path
# Ensure that our session cookie does not collidge with other session cookies
# set by other Django apps served from the same domain.
SESSION_COOKIE_PATH = path_prefix
# https://docs.djangoproject.com/en/1.11/ref/settings/#std:setting-LOGGING
# https://docs.djangoproject.com/en/1.11/topics/logging/
LOGGING = get_logging_config(
conf.LOG_ROOT,
debug=DEBUG,
debug_database=conf.OPTIONS["Server"]["DEBUG_LOG_DATABASE"],
)
# Customizing Django auth system
# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/
AUTH_USER_MODEL = "kolibriauth.FacilityUser"
# Our own custom setting to override the anonymous user model
AUTH_ANONYMOUS_USER_MODEL = "kolibriauth.KolibriAnonymousUser"
AUTHENTICATION_BACKENDS = ["kolibri.core.auth.backends.FacilityUserBackend"]
# Django REST Framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"UNAUTHENTICATED_USER": "kolibri.core.auth.models.KolibriAnonymousUser",
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.SessionAuthentication"
],
"DEFAULT_CONTENT_NEGOTIATION_CLASS": "kolibri.core.negotiation.LimitContentNegotiation",
"EXCEPTION_HANDLER": "kolibri.core.utils.exception_handler.custom_exception_handler",
}
# System warnings to disable
# see https://docs.djangoproject.com/en/1.11/ref/settings/#silenced-system-checks
SILENCED_SYSTEM_CHECKS = ["auth.W004"]
# Configuration for Django JS Reverse
# https://github.com/ierror/django-js-reverse#options
JS_REVERSE_EXCLUDE_NAMESPACES = ["admin"]
ENABLE_DATA_BOOTSTRAPPING = True
# Session configuration
SESSION_ENGINE = "django.contrib.sessions.backends.file"
SESSION_FILE_PATH = os.path.join(conf.KOLIBRI_HOME, "sessions")
SECURE_CONTENT_TYPE_NOSNIFF = True
if not os.path.exists(SESSION_FILE_PATH):
if not os.path.exists(conf.KOLIBRI_HOME):
raise RuntimeError("The KOLIBRI_HOME dir does not exist")
os.mkdir(SESSION_FILE_PATH)
SESSION_COOKIE_NAME = "kolibri"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 1200
apply_settings(sys.modules[__name__])
MORANGO_INSTANCE_INFO = os.environ.get(
"MORANGO_INSTANCE_INFO",
"kolibri.core.auth.constants.morango_sync:CUSTOM_INSTANCE_INFO",
)
# prepend our own Morango Operation to handle custom behaviors during sync
SYNC_OPERATIONS = ("kolibri.core.auth.sync_operations:KolibriSyncOperations",)
MORANGO_INITIALIZE_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_INITIALIZE_OPERATIONS
)
MORANGO_SERIALIZE_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_SERIALIZE_OPERATIONS
)
MORANGO_QUEUE_OPERATIONS = SYNC_OPERATIONS + morango_settings.MORANGO_QUEUE_OPERATIONS
MORANGO_TRANSFERRING_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_TRANSFERRING_OPERATIONS
)
MORANGO_DEQUEUE_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_DEQUEUE_OPERATIONS
)
MORANGO_DESERIALIZE_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_DESERIALIZE_OPERATIONS
)
MORANGO_CLEANUP_OPERATIONS = (
SYNC_OPERATIONS + morango_settings.MORANGO_CLEANUP_OPERATIONS
)
| []
| []
| [
"MORANGO_INSTANCE_INFO"
]
| [] | ["MORANGO_INSTANCE_INFO"] | python | 1 | 0 | |
cmd/mark_price/main.go | package main
import (
"context"
"encoding/json"
"log"
"os"
"strconv"
"strings"
"github.com/buger/jsonparser"
"github.com/soulmachine/coinsignal/config"
"github.com/soulmachine/coinsignal/pojo"
"github.com/soulmachine/coinsignal/pubsub"
"github.com/soulmachine/coinsignal/utils"
)
type MarkPriceRaw struct {
Symbol string `json:"s"`
Price string `json:"p"`
}
func main() {
ctx := context.Background()
redis_url := os.Getenv("REDIS_URL")
if len(redis_url) == 0 {
log.Fatal("The REDIS_URL environment variable is empty")
}
utils.WaitRedis(ctx, redis_url)
rdb := utils.NewRedisClient(redis_url)
publisher := pubsub.NewPublisher(ctx, redis_url)
pubsub := rdb.Subscribe(ctx,
config.REDIS_TOPIC_FUNDING_RATE,
)
// Consume messages.
for msg := range pubsub.Channel() {
raw_msg := pojo.CarbonbotMessage{}
json.Unmarshal([]byte(msg.Payload), &raw_msg)
if raw_msg.Exchange != "binance" {
continue
}
raw_json := []byte(raw_msg.Json)
data, _, _, _ := jsonparser.Get(raw_json, "data")
var mark_prices_raw []MarkPriceRaw
if err := json.Unmarshal(data, &mark_prices_raw); err != nil {
panic(err)
}
for _, mark_price_raw := range mark_prices_raw {
var currency string
if strings.HasSuffix(mark_price_raw.Symbol, "USD_PERP") {
currency = mark_price_raw.Symbol[:len(mark_price_raw.Symbol)-8]
} else if strings.HasSuffix(mark_price_raw.Symbol, "USDT") || strings.HasSuffix(mark_price_raw.Symbol, "BUSD") {
currency = mark_price_raw.Symbol[:len(mark_price_raw.Symbol)-4]
} else {
continue
}
price, _ := strconv.ParseFloat(mark_price_raw.Price, 64)
currency_price := pojo.CurrencyPrice{
Currency: currency,
Price: price,
}
json_bytes, _ := json.Marshal(currency_price)
publisher.Publish(config.REDIS_TOPIC_CURRENCY_PRICE_CHANNEL, string(json_bytes))
}
}
pubsub.Close()
publisher.Close()
}
| [
"\"REDIS_URL\""
]
| []
| [
"REDIS_URL"
]
| [] | ["REDIS_URL"] | go | 1 | 0 | |
novaclient/tests/utils.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import requests
from requests_mock.contrib import fixture as requests_mock_fixture
import six
import testscenarios
import testtools
from novaclient.openstack.common import jsonutils
AUTH_URL = "http://localhost:5002/auth_url"
AUTH_URL_V1 = "http://localhost:5002/auth_url/v1.0"
AUTH_URL_V2 = "http://localhost:5002/auth_url/v2.0"
class TestCase(testtools.TestCase):
TEST_REQUEST_BASE = {
'verify': True,
}
def setUp(self):
super(TestCase, self).setUp()
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
class FixturedTestCase(testscenarios.TestWithScenarios, TestCase):
client_fixture_class = None
data_fixture_class = None
def setUp(self):
super(FixturedTestCase, self).setUp()
self.requests = self.useFixture(requests_mock_fixture.Fixture())
self.data_fixture = None
self.client_fixture = None
self.cs = None
if self.client_fixture_class:
fix = self.client_fixture_class(self.requests)
self.client_fixture = self.useFixture(fix)
self.cs = self.client_fixture.client
if self.data_fixture_class:
fix = self.data_fixture_class(self.requests)
self.data_fixture = self.useFixture(fix)
def assert_called(self, method, path, body=None):
self.assertEqual(self.requests.last_request.method, method)
self.assertEqual(self.requests.last_request.path_url, path)
if body:
req_data = self.requests.last_request.body
if isinstance(req_data, six.binary_type):
req_data = req_data.decode('utf-8')
if not isinstance(body, six.string_types):
# json load if the input body to match against is not a string
req_data = jsonutils.loads(req_data)
self.assertEqual(req_data, body)
class TestResponse(requests.Response):
"""
Class used to wrap requests.Response and provide some
convenience to initialize with a dict
"""
def __init__(self, data):
super(TestResponse, self).__init__()
self._text = None
if isinstance(data, dict):
self.status_code = data.get('status_code')
self.headers = data.get('headers')
# Fake the text attribute to streamline Response creation
self._text = data.get('text')
else:
self.status_code = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def text(self):
return self._text
| []
| []
| [
"OS_STDOUT_CAPTURE",
"OS_STDERR_CAPTURE"
]
| [] | ["OS_STDOUT_CAPTURE", "OS_STDERR_CAPTURE"] | python | 2 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import builtins
import inspect
import importlib
os.environ['XONSH_DEBUG'] = '1'
from xonsh import __version__ as XONSH_VERSION
from xonsh.environ import DEFAULT_DOCS, Env
from xonsh.xontribs import xontrib_metadata
from xonsh import main
from xonsh.commands_cache import CommandsCache
spec = importlib.util.find_spec('prompt_toolkit')
if spec is not None:
# hacky runaround to import PTK-specific events
builtins.__xonsh_env__ = Env()
from xonsh.ptk.shell import events
else:
from xonsh.events import events
sys.path.insert(0, os.path.dirname(__file__))
def setup(sphinx):
from xonsh.pyghooks import XonshConsoleLexer
sphinx.add_lexer("xonshcon", XonshConsoleLexer())
# -- General configuration -----------------------------------------------------
# Documentation is being built on readthedocs, this will be true.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.imgmath',
'sphinx.ext.inheritance_diagram', 'sphinx.ext.viewcode',
#'sphinx.ext.autosummary',
'numpydoc', 'cmdhelp',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'sidebar'
# General information about the project.
project = u'xonsh'
copyright = u'2015, Anthony Scopatz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = XONSH_VERSION.rsplit('.',1)[0]
# The full version, including alpha/beta/rc tags.
release = XONSH_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
exclude_patterns = ['api/blank.rst']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
#pygments_style = 'tango'
#pygments_style = 'pastie'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['xonsh.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
#html_theme = 'altered_nature'
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
if not on_rtd:
import cloud_sptheme as csp
html_theme = 'cloud'
html_theme_options = {
'max_width': '1250px',
'minimal_width': '700px',
'relbarbgcolor': '#000000',
'footerbgcolor': '#FFFFE7',
'sidebarwidth': '322px',
'sidebarbgcolor': '#e7e7ff',
#'googleanalytics_id': 'UA-41934829-1',
'stickysidebar': False,
'highlighttoc': False,
'externalrefs': False,
'collapsiblesidebar': True,
'default_layout_text_size': "100%", # prevents division by zero error
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme", csp.get_theme_dir()]
templates_path = ["_templates_overwrite"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/ascii_conch_part_transparent_tight.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/magic_conch.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = "numpy_friendly.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'xonshdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'xonsh.tex', u'xonsh documentation',
u'Anthony Scopatz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
autodoc_member_order = "groupwise"
autoclass_content = "both"
autosummary_generate = []
# Prevent numpy from making silly tables
numpydoc_show_class_members = False
#
# Auto-generate some docs
#
def make_envvars():
env = Env()
vars = sorted(DEFAULT_DOCS.keys())
s = ('.. list-table::\n'
' :header-rows: 0\n\n')
table = []
ncol = 3
row = ' {0} - :ref:`${1} <{2}>`'
for i, var in enumerate(vars):
star = '*' if i%ncol == 0 else ' '
table.append(row.format(star, var, var.lower()))
table.extend([' -']*((ncol - len(vars)%ncol)%ncol))
s += '\n'.join(table) + '\n\n'
s += ('Listing\n'
'-------\n\n')
sec = ('.. _{low}:\n\n'
'{title}\n'
'{under}\n'
'{docstr}\n\n'
'**configurable:** {configurable}\n\n'
'**default:** {default}\n\n'
'**store_as_str:** {store_as_str}\n\n'
'-------\n\n')
for var in vars:
title = '$' + var
under = '.' * len(title)
vd = env.get_docs(var)
s += sec.format(low=var.lower(), title=title, under=under,
docstr=vd.docstr, configurable=vd.configurable,
default=vd.default, store_as_str=vd.store_as_str)
s = s[:-9]
fname = os.path.join(os.path.dirname(__file__), 'envvarsbody')
with open(fname, 'w') as f:
f.write(s)
def make_xontribs():
md = xontrib_metadata()
names = sorted(d['name'] for d in md['xontribs'] if 'name' in d)
s = ('.. list-table::\n'
' :header-rows: 0\n\n')
table = []
ncol = 5
row = ' {0} - :ref:`{1} <{2}>`'
for i, name in enumerate(names):
star = '*' if i%ncol == 0 else ' '
table.append(row.format(star, name, name.lower()))
table.extend([' -']*((ncol - len(names)%ncol)%ncol))
s += '\n'.join(table) + '\n\n'
s += ('Information\n'
'-----------\n\n')
sec = ('.. _{low}:\n\n'
'{title}\n'
'{under}\n'
':Website: {url}\n'
':Package: {pkg}\n\n'
'{desc}\n\n'
'{inst}'
'-------\n\n')
for name in names:
for d in md['xontribs']:
if d.get('name', None) == name:
break
title = name
under = '.' * len(title)
desc = d.get('description', '')
if not isinstance(desc, str):
desc = ''.join(desc)
pkgname = d.get('package', None)
if pkgname is None:
pkg = 'unknown'
inst = ''
else:
pd = md['packages'].get(pkgname, {})
pkg = pkgname
if 'url' in pd:
pkg = '`{0} website <{1}>`_'.format(pkg, pd['url'])
if 'license' in pd:
pkg = pkg + ', ' + pd['license']
inst = ''
installd = pd.get('install', {})
if len(installd) > 0:
inst = ('**Installation:**\n\n'
'.. code-block:: xonsh\n\n')
for k, v in sorted(pd.get('install', {}).items()):
cmd = "\n ".join(v.split('\n'))
inst += (' # install with {k}\n'
' {cmd}\n\n').format(k=k, cmd=cmd)
s += sec.format(low=name.lower(), title=title, under=under,
url=d.get('url', 'unknown'), desc=desc,
pkg=pkg, inst=inst)
s = s[:-9]
fname = os.path.join(os.path.dirname(__file__), 'xontribsbody')
with open(fname, 'w') as f:
f.write(s)
def make_events():
names = sorted(vars(events).keys())
s = ('.. list-table::\n'
' :header-rows: 0\n\n')
table = []
ncol = 3
row = ' {0} - :ref:`{1} <{2}>`'
for i, var in enumerate(names):
star = '*' if i%ncol == 0 else ' '
table.append(row.format(star, var, var.lower()))
table.extend([' -']*((ncol - len(names)%ncol)%ncol))
s += '\n'.join(table) + '\n\n'
s += ('Listing\n'
'-------\n\n')
sec = ('.. _{low}:\n\n'
'``{title}``\n'
'{under}\n'
'{docstr}\n\n'
'-------\n\n')
for name in names:
event = getattr(events, name)
title = name
docstr = inspect.getdoc(event)
if docstr.startswith(name):
# Assume the first line is a signature
title, docstr = docstr.split('\n', 1)
docstr = docstr.strip()
under = '.' * (len(title) + 4)
s += sec.format(low=name.lower(), title=title, under=under,
docstr=docstr)
s = s[:-9]
fname = os.path.join(os.path.dirname(__file__), 'eventsbody')
with open(fname, 'w') as f:
f.write(s)
make_envvars()
make_xontribs()
make_events()
builtins.__xonsh_history__ = None
builtins.__xonsh_env__ = {}
builtins.__xonsh_commands_cache__ = CommandsCache()
| []
| []
| [
"READTHEDOCS",
"XONSH_DEBUG"
]
| [] | ["READTHEDOCS", "XONSH_DEBUG"] | python | 2 | 0 | |
config.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beego
import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/Ruzung/beego/config"
"github.com/Ruzung/beego/context"
"github.com/Ruzung/beego/logs"
"github.com/Ruzung/beego/session"
"github.com/Ruzung/beego/utils"
)
// Config is the main struct for BConfig
type Config struct {
AppName string //Application name
RunMode string //Running Mode: dev | prod
RouterCaseSensitive bool
ServerName string
RecoverPanic bool
RecoverFunc func(*context.Context)
CopyRequestBody bool
EnableGzip bool
MaxMemory int64
EnableErrorsShow bool
EnableErrorsRender bool
Listen Listen
WebConfig WebConfig
Log LogConfig
}
// Listen holds for http and https related config
type Listen struct {
Graceful bool // Graceful means use graceful module to start the server
ServerTimeOut int64
ListenTCP4 bool
EnableHTTP bool
HTTPAddr string
HTTPPort int
AutoTLS bool
Domains []string
TLSCacheDir string
EnableHTTPS bool
EnableMutualHTTPS bool
HTTPSAddr string
HTTPSPort int
HTTPSCertFile string
HTTPSKeyFile string
TrustCaFile string
EnableAdmin bool
AdminAddr string
AdminPort int
EnableFcgi bool
EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O
}
// WebConfig holds web related config
type WebConfig struct {
AutoRender bool
EnableDocs bool
FlashName string
FlashSeparator string
DirectoryIndex bool
StaticDir map[string]string
StaticExtensionsToGzip []string
TemplateLeft string
TemplateRight string
ViewsPath string
EnableXSRF bool
XSRFKey string
XSRFExpire int
Session SessionConfig
}
// SessionConfig holds session related config
type SessionConfig struct {
SessionOn bool
SessionProvider string
SessionName string
SessionGCMaxLifetime int64
SessionProviderConfig string
SessionCookieLifeTime int
SessionAutoSetCookie bool
SessionDomain string
SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies.
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader string
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
}
// LogConfig holds Log related config
type LogConfig struct {
AccessLogs bool
EnableStaticLogs bool //log static files requests default: false
AccessLogsFormat string //access log format: JSON_FORMAT, APACHE_FORMAT or empty string
FileLineNum bool
Outputs map[string]string // Store Adaptor : config
}
var (
// BConfig is the default config for Application
BConfig *Config
// AppConfig is the instance of Config, store the config information from file
AppConfig *beegoAppConfig
// AppPath is the absolute path to the app
AppPath string
// GlobalSessions is the instance for the session manager
GlobalSessions *session.Manager
// appConfigPath is the path to the config files
appConfigPath string
// appConfigProvider is the provider for the config, default is ini
appConfigProvider = "ini"
)
func init() {
BConfig = newBConfig()
var err error
if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil {
panic(err)
}
workPath, err := os.Getwd()
if err != nil {
panic(err)
}
var filename = "app.conf"
if os.Getenv("BEEGO_RUNMODE") != "" {
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
}
appConfigPath = filepath.Join(workPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
appConfigPath = filepath.Join(AppPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()}
return
}
}
if err = parseConfig(appConfigPath); err != nil {
panic(err)
}
}
func recoverPanic(ctx *context.Context) {
if err := recover(); err != nil {
if err == ErrAbort {
return
}
if !BConfig.RecoverPanic {
panic(err)
}
if BConfig.EnableErrorsShow {
if _, ok := ErrorMaps[fmt.Sprint(err)]; ok {
exception(fmt.Sprint(err), ctx)
return
}
}
var stack string
logs.Critical("the request url is ", ctx.Input.URL())
logs.Critical("Handler crashed with error", err)
for i := 1; ; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
logs.Critical(fmt.Sprintf("%s:%d", file, line))
stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line))
}
if BConfig.RunMode == DEV && BConfig.EnableErrorsRender {
showErr(err, ctx, stack)
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
}
}
func newBConfig() *Config {
return &Config{
AppName: "beego",
RunMode: PROD,
RouterCaseSensitive: true,
ServerName: "beegoServer:" + VERSION,
RecoverPanic: true,
RecoverFunc: recoverPanic,
CopyRequestBody: false,
EnableGzip: false,
MaxMemory: 1 << 26, //64MB
EnableErrorsShow: true,
EnableErrorsRender: true,
Listen: Listen{
Graceful: false,
ServerTimeOut: 0,
ListenTCP4: false,
EnableHTTP: true,
AutoTLS: false,
Domains: []string{},
TLSCacheDir: ".",
HTTPAddr: "",
HTTPPort: 8080,
EnableHTTPS: false,
HTTPSAddr: "",
HTTPSPort: 10443,
HTTPSCertFile: "",
HTTPSKeyFile: "",
EnableAdmin: false,
AdminAddr: "",
AdminPort: 8088,
EnableFcgi: false,
EnableStdIo: false,
},
WebConfig: WebConfig{
AutoRender: true,
EnableDocs: false,
FlashName: "BEEGO_FLASH",
FlashSeparator: "BEEGOFLASH",
DirectoryIndex: false,
StaticDir: map[string]string{"/static": "static"},
StaticExtensionsToGzip: []string{".css", ".js"},
TemplateLeft: "{{",
TemplateRight: "}}",
ViewsPath: "views",
EnableXSRF: false,
XSRFKey: "beegoxsrf",
XSRFExpire: 0,
Session: SessionConfig{
SessionOn: false,
SessionProvider: "memory",
SessionName: "beegosessionID",
SessionGCMaxLifetime: 3600,
SessionProviderConfig: "",
SessionDisableHTTPOnly: false,
SessionCookieLifeTime: 0, //set cookie default is the browser life
SessionAutoSetCookie: true,
SessionDomain: "",
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader: "Beegosessionid",
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
},
},
Log: LogConfig{
AccessLogs: false,
EnableStaticLogs: false,
AccessLogsFormat: "APACHE_FORMAT",
FileLineNum: true,
Outputs: map[string]string{"console": ""},
},
}
}
// now only support ini, next will support json.
func parseConfig(appConfigPath string) (err error) {
AppConfig, err = newAppConfig(appConfigProvider, appConfigPath)
if err != nil {
return err
}
return assignConfig(AppConfig)
}
func assignConfig(ac config.Configer) error {
for _, i := range []interface{}{BConfig, &BConfig.Listen, &BConfig.WebConfig, &BConfig.Log, &BConfig.WebConfig.Session} {
assignSingleConfig(i, ac)
}
// set the run mode first
if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" {
BConfig.RunMode = envRunMode
} else if runMode := ac.String("RunMode"); runMode != "" {
BConfig.RunMode = runMode
}
if sd := ac.String("StaticDir"); sd != "" {
BConfig.WebConfig.StaticDir = map[string]string{}
sds := strings.Fields(sd)
for _, v := range sds {
if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[1]
} else {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[0]
}
}
}
if sgz := ac.String("StaticExtensionsToGzip"); sgz != "" {
extensions := strings.Split(sgz, ",")
fileExts := []string{}
for _, ext := range extensions {
ext = strings.TrimSpace(ext)
if ext == "" {
continue
}
if !strings.HasPrefix(ext, ".") {
ext = "." + ext
}
fileExts = append(fileExts, ext)
}
if len(fileExts) > 0 {
BConfig.WebConfig.StaticExtensionsToGzip = fileExts
}
}
if lo := ac.String("LogOutputs"); lo != "" {
// if lo is not nil or empty
// means user has set his own LogOutputs
// clear the default setting to BConfig.Log.Outputs
BConfig.Log.Outputs = make(map[string]string)
los := strings.Split(lo, ";")
for _, v := range los {
if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 {
BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1]
} else {
continue
}
}
}
//init log
logs.Reset()
for adaptor, config := range BConfig.Log.Outputs {
err := logs.SetLogger(adaptor, config)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Sprintf("%s with the config %q got err:%s", adaptor, config, err.Error()))
}
}
logs.SetLogFuncCall(BConfig.Log.FileLineNum)
return nil
}
func assignSingleConfig(p interface{}, ac config.Configer) {
pt := reflect.TypeOf(p)
if pt.Kind() != reflect.Ptr {
return
}
pt = pt.Elem()
if pt.Kind() != reflect.Struct {
return
}
pv := reflect.ValueOf(p).Elem()
for i := 0; i < pt.NumField(); i++ {
pf := pv.Field(i)
if !pf.CanSet() {
continue
}
name := pt.Field(i).Name
switch pf.Kind() {
case reflect.String:
pf.SetString(ac.DefaultString(name, pf.String()))
case reflect.Int, reflect.Int64:
pf.SetInt(ac.DefaultInt64(name, pf.Int()))
case reflect.Bool:
pf.SetBool(ac.DefaultBool(name, pf.Bool()))
case reflect.Struct:
default:
//do nothing here
}
}
}
// LoadAppConfig allow developer to apply a config file
func LoadAppConfig(adapterName, configPath string) error {
absConfigPath, err := filepath.Abs(configPath)
if err != nil {
return err
}
if !utils.FileExists(absConfigPath) {
return fmt.Errorf("the target config file: %s don't exist", configPath)
}
appConfigPath = absConfigPath
appConfigProvider = adapterName
return parseConfig(appConfigPath)
}
type beegoAppConfig struct {
innerConfig config.Configer
}
func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) {
ac, err := config.NewConfig(appConfigProvider, appConfigPath)
if err != nil {
return nil, err
}
return &beegoAppConfig{ac}, nil
}
func (b *beegoAppConfig) Set(key, val string) error {
if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil {
return err
}
return b.innerConfig.Set(key, val)
}
func (b *beegoAppConfig) String(key string) string {
if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" {
return v
}
return b.innerConfig.String(key)
}
func (b *beegoAppConfig) Strings(key string) []string {
if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); len(v) > 0 {
return v
}
return b.innerConfig.Strings(key)
}
func (b *beegoAppConfig) Int(key string) (int, error) {
if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int(key)
}
func (b *beegoAppConfig) Int64(key string) (int64, error) {
if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int64(key)
}
func (b *beegoAppConfig) Bool(key string) (bool, error) {
if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Bool(key)
}
func (b *beegoAppConfig) Float(key string) (float64, error) {
if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Float(key)
}
func (b *beegoAppConfig) DefaultString(key string, defaultVal string) string {
if v := b.String(key); v != "" {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultStrings(key string, defaultVal []string) []string {
if v := b.Strings(key); len(v) != 0 {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt(key string, defaultVal int) int {
if v, err := b.Int(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt64(key string, defaultVal int64) int64 {
if v, err := b.Int64(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultBool(key string, defaultVal bool) bool {
if v, err := b.Bool(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultFloat(key string, defaultVal float64) float64 {
if v, err := b.Float(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DIY(key string) (interface{}, error) {
return b.innerConfig.DIY(key)
}
func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) {
return b.innerConfig.GetSection(section)
}
func (b *beegoAppConfig) SaveConfigFile(filename string) error {
return b.innerConfig.SaveConfigFile(filename)
}
| [
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\""
]
| []
| [
"BEEGO_RUNMODE"
]
| [] | ["BEEGO_RUNMODE"] | go | 1 | 0 | |
vendor/github.com/minio/minio-go/v6/transport.go | // +build go1.7 go1.8
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"net/http"
"os"
"time"
)
// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows)
func mustGetSystemCertPool() *x509.CertPool {
pool, err := x509.SystemCertPool()
if err != nil {
return x509.NewCertPool()
}
return pool
}
// DefaultTransport - this default transport is similar to
// http.DefaultTransport but with additional param DisableCompression
// is set to true to avoid decompressing content with 'gzip' encoding.
var DefaultTransport = func(secure bool) (http.RoundTripper, error) {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 256,
MaxIdleConnsPerHost: 16,
ResponseHeaderTimeout: time.Minute,
IdleConnTimeout: time.Minute,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}
if secure {
tr.TLSClientConfig = &tls.Config{
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
MinVersion: tls.VersionTLS12,
}
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
rootCAs := mustGetSystemCertPool()
data, err := ioutil.ReadFile(f)
if err == nil {
rootCAs.AppendCertsFromPEM(data)
}
tr.TLSClientConfig.RootCAs = rootCAs
}
}
return tr, nil
}
| [
"\"SSL_CERT_FILE\""
]
| []
| [
"SSL_CERT_FILE"
]
| [] | ["SSL_CERT_FILE"] | go | 1 | 0 | |
pydoc-markdown/src/pydoc_markdown/__init__.py | # -*- coding: utf8 -*-
# Copyright (c) 2019 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Pydoc-markdown is an extensible framework for generating API documentation,
with a focus on Python source code and the Markdown output format.
"""
from nr.databind.core import Collect, Field, FieldName, ObjectMapper, Struct, UnionType
from nr.databind.json import JsonModule
from nr.stream import concat
from pydoc_markdown.interfaces import Context, Loader, Processor, Renderer, Resolver, Builder
from pydoc_markdown.contrib.loaders.python import PythonLoader
from pydoc_markdown.contrib.processors.filter import FilterProcessor
from pydoc_markdown.contrib.processors.crossref import CrossrefProcessor
from pydoc_markdown.contrib.processors.smart import SmartProcessor
from pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer
from pydoc_markdown.util import ytemplate
from typing import List, Union
import docspec
import logging
import os
import subprocess
import yaml
__author__ = 'Niklas Rosenstein <[email protected]>'
__version__ = '3.5.0'
mapper = ObjectMapper(JsonModule())
logger = logging.getLogger(__name__)
class PydocMarkdown(Struct):
"""
This object represents the main configuration for Pydoc-Markdown.
"""
#: A list of loader implementations that load #docspec.Module#s.
#: Defaults to #PythonLoader.
loaders = Field([Loader], default=lambda: [PythonLoader()])
#: A list of processor implementations that modify #docspec.Module#s. Defaults
#: to #FilterProcessor, #SmartProcessor and #CrossrefProcessor.
processors = Field([Processor], default=lambda: [
FilterProcessor(), SmartProcessor(), CrossrefProcessor()])
#: A renderer for #docspec.Module#s. Defaults to #MarkdownRenderer.
renderer = Field(Renderer, default=MarkdownRenderer)
#: Hooks that can be executed at certain points in the pipeline. The commands
#: are executed with the current `SHELL`.
hooks = Field({
'pre_render': Field([str], FieldName('pre-render'), default=list),
'post_render': Field([str], FieldName('post-render'), default=list),
}, default=Field.DEFAULT_CONSTRUCT)
# Hidden fields are filled at a later point in time and are not (de-) serialized.
unknown_fields = Field([str], default=list, hidden=True)
resolver = Field(Resolver, default=None, hidden=True)
def __init__(self, *args, **kwargs) -> None:
super(PydocMarkdown, self).__init__(*args, **kwargs)
self.resolver = None
self._context = None
def load_config(self, data: Union[str, dict]) -> None:
"""
Loads a YAML configuration from *data*.
:param data: Nested structurre or the path to a YAML configuration file.
"""
filename = None
if isinstance(data, str):
filename = data
logger.info('Loading configuration file "%s".', filename)
data = ytemplate.load(filename, {'env': ytemplate.Attributor(os.environ)})
collector = Collect()
result = mapper.deserialize(data, type(self), filename=filename, decorations=[collector])
vars(self).update(vars(result))
self.unknown_fields = list(concat((str(n.locator.append(u)) for u in n.unknowns)
for n in collector.nodes))
def init(self, context: Context) -> None:
"""
Initialize all plugins with the specified *context*. Cannot be called multiple times.
If omitted, the plugins will be initialized with a default context before the load,
process or render phase.
"""
if self._context:
raise RuntimeError('already initialized')
self._context = context
logger.debug('Initializing plugins with context %r', context)
for loader in self.loaders:
loader.init(context)
for processor in self.processors:
processor.init(context)
self.renderer.init(context)
def ensure_initialized(self) -> None:
if not self._context:
self.init(Context(directory='.'))
def load_modules(self) -> List[docspec.Module]:
"""
Loads modules via the #loaders.
"""
logger.info('Loading modules.')
self.ensure_initialized()
modules = []
for loader in self.loaders:
modules.extend(loader.load())
return modules
def process(self, modules: List[docspec.Module]) -> None:
"""
Process modules via the #processors.
"""
self.ensure_initialized()
if self.resolver is None:
self.resolver = self.renderer.get_resolver(modules)
for processor in self.processors:
processor.process(modules, self.resolver)
def render(self, modules: List[docspec.Module], run_hooks: bool = True) -> None:
"""
Render modules via the #renderer.
"""
self.ensure_initialized()
if run_hooks:
self.run_hooks('pre-render')
if self.resolver is None:
self.resolver = self.renderer.get_resolver(modules)
self.renderer.process(modules, self.resolver)
self.renderer.render(modules)
if run_hooks:
self.run_hooks('post-render')
def build(self, site_dir: str=None) -> None:
if not Builder.provided_by(self.renderer):
name = type(self.renderer).__name__
raise NotImplementedError('Renderer "{}" does not support building'.format(name))
self.ensure_initialized()
self.renderer.build(site_dir)
def run_hooks(self, hook_name: str) -> None:
for command in getattr(self.hooks, hook_name.replace('-', '_')):
subprocess.check_call(command, shell=True, cwd=self._context.directory)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
jolokia-harness.go | package main
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
)
func StartJolokia(jolokiaPath string, jolokiaOption string, javaProcess string) (string, error) {
var params = "-jar " + jolokiaPath + " " + jolokiaOption + " " + javaProcess
java := filepath.Join(getJavaPath(), "/bin/java.exe")
jolokiaCommand := exec.Command(java, strings.Split(params, " ")...)
out, err := jolokiaCommand.Output()
if err != nil {
return "", err
}
outstr := fmt.Sprintf("%s", out)
return outstr, nil
}
func getJavaPath() string {
return os.Getenv("JAVA_HOME")
}
func getJavaVersion() string {
java := filepath.Join(getJavaPath(), "/bin/java.exe")
fmt.Println(java)
javaVersion := exec.Command(java, strings.Split("-version", " ")...)
out, err := javaVersion.CombinedOutput()
if err != nil {
log.Fatal(err)
}
outstr := fmt.Sprintf("%s", out)
return outstr
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | go | 1 | 0 | |
cekit/builders/docker_builder.py | import json
import logging
import os
import re
import sys
import traceback
from collections import OrderedDict
from cekit.builder import Builder
from cekit.errors import CekitError
LOGGER = logging.getLogger('cekit')
# Ignore any failure on non-core modules, we will catch it later
# and suggest a solution
try:
# Squash library
from docker_squash.squash import Squash
except ImportError:
pass
try:
# Docker Python library, the new one
import docker
from docker.api.client import APIClient as APIClientClass
except ImportError:
pass
try:
# The requests library is an indirect dependency, we need to put it here
# so that the dependency mechanism can kick in and require the docker library
# first which will pull requests
import requests
except ImportError:
pass
try:
# Docker Python library, the old one
import docker # pylint: disable=ungrouped-imports
from docker.client import Client as APIClientClass # pylint: disable=ungrouped-imports
except ImportError:
pass
ANSI_ESCAPE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
class DockerBuilder(Builder):
"""This class wraps docker build command to build and image"""
def __init__(self, params):
super(DockerBuilder, self).__init__('docker', params)
@staticmethod
def dependencies(params=None):
deps = {}
deps['python-docker'] = {
'library': 'docker',
'package': 'python-docker-py',
'fedora': {
'package': 'python3-docker'}
}
if params is not None and not params.no_squash:
deps['docker-squash'] = {
'library': 'docker_squash',
'fedora': {
'package': 'python3-docker-squash'
}
}
return deps
def _build_with_docker(self, docker_client):
docker_args = {}
docker_args['decode'] = True
docker_args['path'] = os.path.join(self.target, 'image')
docker_args['pull'] = self.params.pull
docker_args['rm'] = True
if self.params.platform:
docker_args['platform'] = self.params.platform
build_log = []
docker_layer_ids = []
LOGGER.debug("Running Docker build: {}".format(str(docker_args)))
try:
stream = docker_client.build(**docker_args)
for part in stream:
# In case an error is returned, log the message and fail the build
if 'errorDetail' in part:
error_message = part.get('errorDetail', {}).get('message', '')
raise CekitError("Image build failed: '{}'".format(error_message))
elif 'stream' in part:
messages = part['stream']
else:
# We actually expect only 'stream' here.
# If there is something different, we ignore it.
# It's safe to do so because if it would be an error, we would catch it
# earlier. Ignored logs are related to fetching/pulling/extracting
# of container images.
continue
# This prevents polluting CEKit log with downloading/extracting messages
messages = ANSI_ESCAPE.sub('', messages).strip()
# Python 2 compatibility
if sys.version_info[0] == 2:
messages = messages.encode("utf-8", errors="ignore")
for message in messages.split('\n'):
LOGGER.info('Docker: {}'.format(message))
build_log.append(messages)
layer_id_match = re.search(r'^---> ([\w]{12})$', messages)
if layer_id_match:
docker_layer_ids.append(layer_id_match.group(1))
except requests.ConnectionError as ex:
exception_chain = traceback.format_exc()
LOGGER.debug("Caught ConnectionError attempting to communicate with Docker ", exc_info=1)
if 'PermissionError' in exception_chain:
message = "Unable to contact docker daemon. Is it correctly setup?\n" \
"See https://developer.fedoraproject.org/tools/docker/docker-installation.html and " \
"http://www.projectatomic.io/blog/2015/08/why-we-dont-let-non-root-users-run-docker-in-centos-fedora-or-rhel"
elif 'FileNotFoundError' in exception_chain:
message = "Unable to contact docker daemon. Is it started?"
else:
message = "Unknown ConnectionError from docker ; is the daemon started and correctly setup?"
if sys.version_info.major == 3:
# Work-around for python 2 / 3 code - replicate exception(...) from None
cekit_exception = CekitError(message, ex)
cekit_exception.__cause__ = None
raise cekit_exception
else:
raise CekitError(message, ex)
except Exception as ex:
msg = "Image build failed, see logs above."
if len(docker_layer_ids) >= 2:
LOGGER.error("You can look inside the failed image by running "
"'docker run --rm -ti {} bash'".format(docker_layer_ids[-1]))
if "To enable Red Hat Subscription Management repositories:" in ' '.join(build_log) and \
not os.path.exists(os.path.join(self.target, 'image', 'repos')):
msg = "Image build failed with a yum error and you don't " \
"have any yum repository configured, please check " \
"your image/module descriptor for proper repository " \
"definitions."
raise CekitError(msg, ex)
return docker_layer_ids[-1]
def _squash(self, docker_client, image_id):
LOGGER.info("Squashing image {}...".format(image_id))
squash = Squash(docker=docker_client,
log=LOGGER,
from_layer=self.generator.image['from'],
image=image_id,
cleanup=True)
return squash.run()
def _tag(self, docker_client, image_id, tags):
for tag in tags:
if ':' in tag:
img_repo, img_tag = tag.rsplit(":", 1)
docker_client.tag(image_id, img_repo, tag=img_tag)
else:
docker_client.tag(image_id, tag)
def _docker_client(self):
LOGGER.debug("Preparing Docker client...")
# Default Docker daemon connection timeout 10 minutes
# It needs to be high enough to allow Docker daemon to export the
# image for squashing.
try:
timeout = int(os.getenv('DOCKER_TIMEOUT', '600'))
except ValueError:
raise CekitError("Provided timeout value: '{}' cannot be parsed as integer, exiting.".format(
os.getenv('DOCKER_TIMEOUT')))
if timeout <= 0:
raise CekitError(
"Provided timeout value needs to be greater than zero, currently: '{}', exiting.".format(timeout))
params = {"version": "1.22"}
params.update(docker.utils.kwargs_from_env())
params["timeout"] = timeout
try:
client = APIClientClass(**params)
except docker.errors.DockerException as e:
LOGGER.error("Could not create Docker client, please make sure that you "
"specified valid parameters in the 'DOCKER_HOST' environment variable, "
"examples: 'unix:///var/run/docker.sock', 'tcp://192.168.22.33:1234'")
raise CekitError("Error while creating the Docker client", e)
if client and self._valid_docker_connection(client):
LOGGER.debug("Docker client ready and working")
LOGGER.debug(client.version())
return client
LOGGER.error(
"Could not connect to the Docker daemon at '{}', please make sure the Docker "
"daemon is running.".format(client.base_url))
if client.base_url.startswith('unix'):
LOGGER.error(
"Please make sure the Docker socket has correct permissions.")
if os.environ.get('DOCKER_HOST'):
LOGGER.error("If Docker daemon is running, please make sure that you specified valid "
"parameters in the 'DOCKER_HOST' environment variable, examples: "
"'unix:///var/run/docker.sock', 'tcp://192.168.22.33:1234'. You may "
"also need to specify 'DOCKER_TLS_VERIFY', and 'DOCKER_CERT_PATH' "
"environment variables.")
raise CekitError("Cannot connect to Docker daemon")
def _valid_docker_connection(self, client):
try:
return client.ping()
except requests.exceptions.ConnectionError:
pass
return False
def run(self):
tags = self.params.tags
if not tags:
tags = self.generator.get_tags()
LOGGER.debug("Building image with tags: '{}'".format("', '".join(tags)))
LOGGER.info("Building container image...")
docker_client = self._docker_client()
# Build image
image_id = self._build_with_docker(docker_client)
# Squash only if --no-squash is NOT defined
if not self.params.no_squash:
image_id = self._squash(docker_client, image_id)
# Tag the image
self._tag(docker_client, image_id, tags)
LOGGER.info("Image built and available under following tags: {}".format(", ".join(tags)))
| []
| []
| [
"DOCKER_HOST",
"DOCKER_TIMEOUT"
]
| [] | ["DOCKER_HOST", "DOCKER_TIMEOUT"] | python | 2 | 0 | |
cmd/abapEnvironmentAssemblePackages_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapEnvironmentAssemblePackagesOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
CfServiceKeyName string `json:"cfServiceKeyName,omitempty"`
Host string `json:"host,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
MaxRuntimeInMinutes int `json:"maxRuntimeInMinutes,omitempty"`
}
type abapEnvironmentAssemblePackagesCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapEnvironmentAssemblePackagesCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value string
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapEnvironmentAssemblePackagesCommand Assembly of installation, support package or patch in SAP Cloud Platform ABAP Environment system
func AbapEnvironmentAssemblePackagesCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentAssemblePackages"
metadata := abapEnvironmentAssemblePackagesMetadata()
var stepConfig abapEnvironmentAssemblePackagesOptions
var startTime time.Time
var commonPipelineEnvironment abapEnvironmentAssemblePackagesCommonPipelineEnvironment
var createAbapEnvironmentAssemblePackagesCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Assembly of installation, support package or patch in SAP Cloud Platform ABAP Environment system",
Long: `This step runs the assembly of a list of provided installations, support packages or patches in SAP Cloud
Platform ABAP Environment system and saves the corresponding SAR archive to the filesystem.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
abapEnvironmentAssemblePackages(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentAssemblePackagesFlags(createAbapEnvironmentAssemblePackagesCmd, &stepConfig)
return createAbapEnvironmentAssemblePackagesCmd
}
func addAbapEnvironmentAssemblePackagesFlags(cmd *cobra.Command, stepConfig *abapEnvironmentAssemblePackagesOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "CF org")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "CF Space")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Parameter of ServiceInstance Name to delete CloudFoundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Parameter of CloudFoundry Service Key to be created")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "User Password for CF User")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "AddonDescriptor")
cmd.Flags().IntVar(&stepConfig.MaxRuntimeInMinutes, "maxRuntimeInMinutes", 360, "maximal runtime of the step")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptor")
cmd.MarkFlagRequired("maxRuntimeInMinutes")
}
// retrieve step metadata
func abapEnvironmentAssemblePackagesMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentAssemblePackages",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
},
{
Name: "cfServiceKeyName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKey"}},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{{Name: "commonPipelineEnvironment", Param: "abap/addonDescriptor"}},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "maxRuntimeInMinutes",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "int",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\"",
"\"PIPER_host\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
]
| []
| [
"PIPER_addonDescriptor",
"PIPER_cfSpace",
"PIPER_host",
"PIPER_cfApiEndpoint",
"PIPER_password",
"PIPER_username",
"PIPER_cfServiceInstance",
"PIPER_cfServiceKeyName",
"PIPER_cfOrg"
]
| [] | ["PIPER_addonDescriptor", "PIPER_cfSpace", "PIPER_host", "PIPER_cfApiEndpoint", "PIPER_password", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg"] | go | 9 | 0 | |
python/tvm/contrib/xcode.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke Xcode compiler toolchain"""
from __future__ import absolute_import as _abs
import os
import sys
import subprocess
from .._ffi.base import py_str
from . import util
def xcrun(cmd):
"""Run xcrun and return the output.
Parameters
----------
cmd : list of str
The command sequence.
Returns
-------
out : str
The output string.
"""
cmd = ["xcrun"] + cmd
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
return out.strip()
def codesign(lib):
"""Codesign the shared libary
This is an required step for library to be loaded in
the app.
Parameters
----------
lib : The path to the library.
"""
if "TVM_IOS_CODESIGN" not in os.environ:
raise RuntimeError("Require environment variable TVM_IOS_CODESIGN "
" to be the signature")
signature = os.environ["TVM_IOS_CODESIGN"]
cmd = ["codesign", "--force", "--sign", signature]
cmd += [lib]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Codesign error:\n"
msg += py_str(out)
raise RuntimeError(msg)
def create_dylib(output, objects, arch, sdk="macosx"):
"""Create dynamic library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
arch : str
Target major architectures
sdk : str
The sdk to be used.
"""
clang = xcrun(["-sdk", sdk, "-find", "clang"])
sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"])
cmd = [clang]
cmd += ["-dynamiclib"]
cmd += ["-arch", arch]
cmd += ["-isysroot", sdk_path]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign so as default output format
create_dylib.output_format = "dylib"
def compile_metal(code, path_target=None, sdk="macosx"):
"""Compile metal with CLI tool from env.
Parameters
----------
code : str
The cuda code.
path_target : str, optional
Output file.
sdk : str, optional
The target platform SDK.
Return
------
metallib : bytearray
The bytearray of the metallib
"""
temp = util.tempdir()
temp_code = temp.relpath("my_lib.metal")
temp_ir = temp.relpath("my_lib.air")
temp_target = temp.relpath("my_lib.metallib")
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
# See:
# - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long
#
# xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air
# xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib
cmd1 = ["xcrun", "-sdk", sdk, "metal", "-O3"]
cmd1 += ["-c", temp_code, "-o", temp_ir]
cmd2 = ["xcrun", "-sdk", sdk, "metallib"]
cmd2 += [temp_ir, "-o", file_target]
proc = subprocess.Popen(
' '.join(cmd1) + ";" + ' '.join(cmd2),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.stderr.write("Compilation error:\n")
sys.stderr.write(py_str(out))
sys.stderr.flush()
libbin = None
else:
libbin = bytearray(open(file_target, "rb").read())
return libbin
class XCodeRPCServer(object):
"""Wrapper for RPC server
Parameters
----------
cmd : list of str
The command to run
lock: FileLock
Lock on the path
"""
def __init__(self, cmd, lock):
self.proc = subprocess.Popen(cmd)
self.lock = lock
def join(self):
"""Wait server to finish and release its resource
"""
self.proc.wait()
self.lock.release()
def popen_test_rpc(host,
port,
key,
destination,
libs=None,
options=None):
"""Launch rpc server via xcodebuild test through another process.
Parameters
----------
host : str
The address of RPC proxy host.
port : int
The port of RPC proxy host
key : str
The key of the RPC server
destination : str
Destination device of deployment, as in xcodebuild
libs : list of str
List of files to be packed into app/Frameworks/tvm
These can be dylibs that can be loaed remoted by RPC.
options : list of str
Additional options to xcodebuild
Returns
-------
proc : Popen
The test rpc server process.
Don't do wait() on proc, since it can terminate normally.
"""
if "TVM_IOS_RPC_ROOT" in os.environ:
rpc_root = os.environ["TVM_IOS_RPC_ROOT"]
else:
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
rpc_root = os.path.join(curr_path, "../../../apps/ios_rpc")
proj_path = os.path.realpath(os.path.join(rpc_root, "tvmrpc.xcodeproj"))
if not os.path.exists(proj_path):
raise RuntimeError("Cannot find tvmrpc.xcodeproj in %s," +
(" please set env TVM_IOS_RPC_ROOT correctly" % rpc_root))
# Lock the path so only one file can run
lock = util.filelock(os.path.join(rpc_root, "ios_rpc.lock"))
with open(os.path.join(rpc_root, "rpc_config.txt"), "w") as fo:
fo.write("%s %d %s\n" % (host, port, key))
libs = libs if libs else []
for file_name in libs:
fo.write("%s\n" % file_name)
cmd = ["xcrun", "xcodebuild",
"-scheme", "tvmrpc",
"-project", proj_path,
"-destination", destination]
if options:
cmd += options
cmd += ["test"]
return XCodeRPCServer(cmd, lock)
| []
| []
| [
"TVM_IOS_RPC_ROOT",
"TVM_IOS_CODESIGN"
]
| [] | ["TVM_IOS_RPC_ROOT", "TVM_IOS_CODESIGN"] | python | 2 | 0 | |
test/util/server/server.go | package server
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path"
"sync"
"testing"
"time"
"github.com/golang/glog"
etcdclientv3 "github.com/coreos/etcd/clientv3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
knet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
kubeclient "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kapi "k8s.io/kubernetes/pkg/apis/core"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
authorizationv1typedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1"
projectv1typedclient "github.com/openshift/client-go/project/clientset/versioned/typed/project/v1"
"github.com/openshift/library-go/pkg/crypto"
"github.com/openshift/origin/pkg/cmd/server/admin"
configapi "github.com/openshift/origin/pkg/cmd/server/apis/config"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
"github.com/openshift/origin/pkg/cmd/server/etcd"
"github.com/openshift/origin/pkg/cmd/server/start"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
newproject "github.com/openshift/origin/pkg/oc/cli/admin/project"
"github.com/openshift/origin/test/util"
// install all APIs
_ "github.com/openshift/origin/pkg/api/install"
"github.com/openshift/origin/pkg/api/legacy"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
)
var (
// startLock protects access to the start vars
startLock sync.Mutex
// startedMaster is true if the master has already been started in process
startedMaster bool
// startedNode is true if the node has already been started in process
startedNode bool
)
// guardMaster prevents multiple master processes from being started at once
func guardMaster() {
startLock.Lock()
defer startLock.Unlock()
if startedMaster {
panic("the master has already been started once in this process - run only a single test, or use the sub-shell")
}
startedMaster = true
}
// guardMaster prevents multiple master processes from being started at once
func guardNode() {
startLock.Lock()
defer startLock.Unlock()
if startedNode {
panic("the node has already been started once in this process - run only a single test, or use the sub-shell")
}
startedNode = true
}
// ServiceAccountWaitTimeout is used to determine how long to wait for the service account
// controllers to start up, and populate the service accounts in the test namespace
const ServiceAccountWaitTimeout = 30 * time.Second
// PodCreationWaitTimeout is used to determine how long to wait after the service account token
// is available for the admission control cache to catch up and allow pod creation
const PodCreationWaitTimeout = 10 * time.Second
// FindAvailableBindAddress returns a bind address on 127.0.0.1 with a free port in the low-high range.
// If lowPort is 0, an ephemeral port is allocated.
func FindAvailableBindAddress(lowPort, highPort int) (string, error) {
if highPort < lowPort {
return "", errors.New("lowPort must be <= highPort")
}
for port := lowPort; port <= highPort; port++ {
tryPort := port
if tryPort == 0 {
tryPort = int(rand.Int31n(int32(highPort-1024)) + 1024)
} else {
tryPort = int(rand.Int31n(int32(highPort-lowPort))) + lowPort
}
l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", tryPort))
if err != nil {
if port == 0 {
// Only get one shot to get an ephemeral port
return "", err
}
continue
}
defer l.Close()
return l.Addr().String(), nil
}
return "", fmt.Errorf("Could not find available port in the range %d-%d", lowPort, highPort)
}
func setupStartOptions(useDefaultPort bool) *start.MasterArgs {
masterArgs := start.NewDefaultMasterArgs()
basedir := util.GetBaseDir()
// Allows to override the default etcd directory from the shell script.
etcdDir := os.Getenv("TEST_ETCD_DIR")
if len(etcdDir) == 0 {
etcdDir = path.Join(basedir, "etcd")
}
masterArgs.EtcdDir = etcdDir
masterArgs.ConfigDir.Default(path.Join(basedir, "openshift.local.config", "master"))
if !useDefaultPort {
// don't wait for nodes to come up
masterAddr := os.Getenv("OS_MASTER_ADDR")
if len(masterAddr) == 0 {
if addr, err := FindAvailableBindAddress(10000, 29999); err != nil {
glog.Fatalf("Couldn't find free address for master: %v", err)
} else {
masterAddr = addr
}
}
masterArgs.MasterAddr.Set(masterAddr)
masterArgs.ListenArg.ListenAddr.Set(masterAddr)
}
dnsAddr := os.Getenv("OS_DNS_ADDR")
if len(dnsAddr) == 0 {
if addr, err := FindAvailableBindAddress(10000, 29999); err != nil {
glog.Fatalf("Couldn't find free address for DNS: %v", err)
} else {
dnsAddr = addr
}
}
masterArgs.DNSBindAddr.Set(dnsAddr)
return masterArgs
}
func DefaultMasterOptions() (*configapi.MasterConfig, error) {
return DefaultMasterOptionsWithTweaks(false)
}
func DefaultMasterOptionsWithTweaks(useDefaultPort bool) (*configapi.MasterConfig, error) {
startOptions := start.MasterOptions{}
startOptions.MasterArgs = setupStartOptions(useDefaultPort)
startOptions.Complete()
// reset, since Complete alters the default
startOptions.MasterArgs.ConfigDir.Default(path.Join(util.GetBaseDir(), "openshift.local.config", "master"))
if err := CreateMasterCerts(startOptions.MasterArgs); err != nil {
return nil, err
}
masterConfig, err := startOptions.MasterArgs.BuildSerializeableMasterConfig()
if err != nil {
return nil, err
}
if masterConfig.AdmissionConfig.PluginConfig == nil {
masterConfig.AdmissionConfig.PluginConfig = make(map[string]*configapi.AdmissionPluginConfig)
}
if masterConfig.EtcdConfig != nil {
addr, err := FindAvailableBindAddress(10000, 29999)
if err != nil {
return nil, fmt.Errorf("can't setup etcd address: %v", err)
}
peerAddr, err := FindAvailableBindAddress(10000, 29999)
if err != nil {
return nil, fmt.Errorf("can't setup etcd address: %v", err)
}
masterConfig.EtcdConfig.Address = addr
masterConfig.EtcdConfig.ServingInfo.BindAddress = masterConfig.EtcdConfig.Address
masterConfig.EtcdConfig.PeerAddress = peerAddr
masterConfig.EtcdConfig.PeerServingInfo.BindAddress = masterConfig.EtcdConfig.PeerAddress
masterConfig.EtcdClientInfo.URLs = []string{"https://" + masterConfig.EtcdConfig.Address}
}
// List public registries that make sense to allow importing images from by default.
// By default all registries have set to be "secure", iow. the port for them is
// defaulted to "443".
// If the registry you are adding here is insecure, you can add 'Insecure: true' to
// make it default to port '80'.
// If the registry you are adding use custom port, you have to specify the port as
// part of the domain name.
recommendedAllowedRegistriesForImport := configapi.AllowedRegistries{
{DomainName: "docker.io"},
{DomainName: "*.docker.io"}, // registry-1.docker.io
{DomainName: "*.redhat.com"}, // registry.connect.redhat.com and registry.access.redhat.com
{DomainName: "gcr.io"},
{DomainName: "quay.io"},
{DomainName: "registry.centos.org"},
{DomainName: "registry.redhat.io"},
}
masterConfig.ImagePolicyConfig.ScheduledImageImportMinimumIntervalSeconds = 1
allowedRegistries := append(
recommendedAllowedRegistriesForImport,
configapi.RegistryLocation{DomainName: "127.0.0.1:*"},
)
for r := range util.GetAdditionalAllowedRegistries() {
allowedRegistries = append(allowedRegistries, configapi.RegistryLocation{DomainName: r})
}
masterConfig.ImagePolicyConfig.AllowedRegistriesForImport = &allowedRegistries
glog.Infof("Starting integration server from master %s", startOptions.MasterArgs.ConfigDir.Value())
return masterConfig, nil
}
func CreateMasterCerts(masterArgs *start.MasterArgs) error {
hostnames, err := masterArgs.GetServerCertHostnames()
if err != nil {
return err
}
masterURL, err := masterArgs.GetMasterAddress()
if err != nil {
return err
}
publicMasterURL, err := masterArgs.GetMasterPublicAddress()
if err != nil {
return err
}
createMasterCerts := admin.CreateMasterCertsOptions{
CertDir: masterArgs.ConfigDir.Value(),
SignerName: admin.DefaultSignerName(),
Hostnames: hostnames.List(),
ExpireDays: crypto.DefaultCertificateLifetimeInDays,
SignerExpireDays: crypto.DefaultCACertificateLifetimeInDays,
APIServerURL: masterURL.String(),
PublicAPIServerURL: publicMasterURL.String(),
IOStreams: genericclioptions.IOStreams{Out: os.Stderr},
}
if err := createMasterCerts.Validate(nil); err != nil {
return err
}
if err := createMasterCerts.CreateMasterCerts(); err != nil {
return err
}
return nil
}
func MasterEtcdClients(config *configapi.MasterConfig) (*etcdclientv3.Client, error) {
etcd3, err := etcd.MakeEtcdClientV3(config.EtcdClientInfo)
if err != nil {
return nil, err
}
return etcd3, nil
}
func CleanupMasterEtcd(t *testing.T, config *configapi.MasterConfig) {
etcd3, err := MasterEtcdClients(config)
if err != nil {
t.Logf("Unable to get etcd client available for master: %v", err)
}
dumpEtcdOnFailure(t, etcd3)
if config.EtcdConfig != nil {
if len(config.EtcdConfig.StorageDir) > 0 {
if err := os.RemoveAll(config.EtcdConfig.StorageDir); err != nil {
t.Logf("Unable to clean up the config storage directory %s: %v", config.EtcdConfig.StorageDir, err)
}
}
}
}
func StartConfiguredMaster(masterConfig *configapi.MasterConfig) (string, error) {
return StartConfiguredMasterWithOptions(masterConfig)
}
func StartConfiguredMasterAPI(masterConfig *configapi.MasterConfig) (string, error) {
// we need to unconditionally start this controller for rbac permissions to work
if masterConfig.KubernetesMasterConfig.ControllerArguments == nil {
masterConfig.KubernetesMasterConfig.ControllerArguments = map[string][]string{}
}
masterConfig.KubernetesMasterConfig.ControllerArguments["controllers"] = append(masterConfig.KubernetesMasterConfig.ControllerArguments["controllers"], "serviceaccount-token", "clusterrole-aggregation")
return StartConfiguredMasterWithOptions(masterConfig)
}
func StartConfiguredMasterWithOptions(masterConfig *configapi.MasterConfig) (string, error) {
guardMaster()
// openshift apiserver needs its own scheme, but this installs it for now. oc needs it off, openshift apiserver needs it on. awesome.
legacy.InstallInternalLegacyAll(legacyscheme.Scheme)
if masterConfig.EtcdConfig != nil && len(masterConfig.EtcdConfig.StorageDir) > 0 {
os.RemoveAll(masterConfig.EtcdConfig.StorageDir)
}
if err := start.NewMaster(masterConfig, true /* always needed for cluster role aggregation */, true).Start(); err != nil {
return "", err
}
adminKubeConfigFile := util.KubeConfigPath()
clientConfig, err := util.GetClusterAdminClientConfig(adminKubeConfigFile)
if err != nil {
return "", err
}
masterURL, err := url.Parse(clientConfig.Host)
if err != nil {
return "", err
}
// wait for the server to come up: 35 seconds
if err := cmdutil.WaitForSuccessfulDial(true, "tcp", masterURL.Host, 100*time.Millisecond, 1*time.Second, 35); err != nil {
return "", err
}
var healthzResponse string
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
var healthy bool
healthy, healthzResponse, err = IsServerHealthy(*masterURL, masterConfig.OAuthConfig != nil)
if err != nil {
return false, err
}
return healthy, nil
})
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("server did not become healthy: %v", healthzResponse)
}
if err != nil {
return "", err
}
// wait until the cluster roles have been aggregated
clusterAdminClientConfig, err := util.GetClusterAdminClientConfig(adminKubeConfigFile)
if err != nil {
return "", err
}
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
kubeClient, err := kubeclient.NewForConfig(clusterAdminClientConfig)
if err != nil {
return false, err
}
admin, err := kubeClient.RbacV1().ClusterRoles().Get("admin", metav1.GetOptions{})
if err != nil {
return false, err
}
if len(admin.Rules) == 0 {
return false, nil
}
edit, err := kubeClient.RbacV1().ClusterRoles().Get("edit", metav1.GetOptions{})
if err != nil {
return false, err
}
if len(edit.Rules) == 0 {
return false, nil
}
view, err := kubeClient.RbacV1().ClusterRoles().Get("view", metav1.GetOptions{})
if err != nil {
return false, err
}
if len(view.Rules) == 0 {
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("server did not become healthy: %v", healthzResponse)
}
if err != nil {
return "", err
}
return adminKubeConfigFile, nil
}
func IsServerHealthy(url url.URL, checkOAuth bool) (bool, string, error) {
healthy, healthzResponse, err := isServerPathHealthy(url, "/healthz", http.StatusOK)
if err != nil || !healthy || !checkOAuth {
return healthy, healthzResponse, err
}
// As a special case, check this endpoint as well since the OAuth server is not part of the /healthz check
// Whenever the OAuth server gets split out, it would have its own /healthz and post start hooks to handle this
return isServerPathHealthy(url, "/oauth/token/request", http.StatusFound)
}
func isServerPathHealthy(url url.URL, path string, code int) (bool, string, error) {
transport := knet.SetTransportDefaults(&http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
})
url.Path = path
req, err := http.NewRequest("GET", url.String(), nil)
req.Header.Set("Accept", "text/html")
resp, err := transport.RoundTrip(req)
if err != nil {
return false, "", err
}
defer resp.Body.Close()
content, _ := ioutil.ReadAll(resp.Body)
return resp.StatusCode == code, string(content), nil
}
// StartTestMaster starts up a test master and returns back the startOptions so you can get clients and certs
func StartTestMaster() (*configapi.MasterConfig, string, error) {
master, err := DefaultMasterOptions()
if err != nil {
return nil, "", err
}
adminKubeConfigFile, err := StartConfiguredMaster(master)
return master, adminKubeConfigFile, err
}
func StartTestMasterAPI() (*configapi.MasterConfig, string, error) {
master, err := DefaultMasterOptions()
if err != nil {
return nil, "", err
}
adminKubeConfigFile, err := StartConfiguredMasterAPI(master)
return master, adminKubeConfigFile, err
}
// serviceAccountSecretsExist checks whether the given service account has at least a token and a dockercfg
// secret associated with it.
func serviceAccountSecretsExist(clientset kclientset.Interface, namespace string, sa *kapi.ServiceAccount) bool {
foundTokenSecret := false
foundDockercfgSecret := false
for _, secret := range sa.Secrets {
ns := namespace
if len(secret.Namespace) > 0 {
ns = secret.Namespace
}
secret, err := clientset.Core().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
if err == nil {
switch secret.Type {
case kapi.SecretTypeServiceAccountToken:
foundTokenSecret = true
case kapi.SecretTypeDockercfg:
foundDockercfgSecret = true
}
}
}
return foundTokenSecret && foundDockercfgSecret
}
// WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists
// and that the cache for the admission control that checks for pod tokens has caught up to allow
// pod creation.
func WaitForPodCreationServiceAccounts(clientset kclientset.Interface, namespace string) error {
if err := WaitForServiceAccounts(clientset, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
return err
}
testPod := &kapi.Pod{}
testPod.GenerateName = "test"
testPod.Spec.Containers = []kapi.Container{
{
Name: "container",
Image: "openshift/origin-pod:latest",
},
}
return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) {
pod, err := clientset.Core().Pods(namespace).Create(testPod)
if err != nil {
glog.Warningf("Error attempting to create test pod: %v", err)
return false, nil
}
err = clientset.Core().Pods(namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
if err != nil {
return false, err
}
return true, nil
})
}
// WaitForServiceAccounts ensures the service accounts needed by build pods exist in the namespace
// The extra controllers tend to starve the service account controller
func WaitForServiceAccounts(clientset kclientset.Interface, namespace string, accounts []string) error {
serviceAccounts := clientset.Core().ServiceAccounts(namespace)
return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) {
for _, account := range accounts {
sa, err := serviceAccounts.Get(account, metav1.GetOptions{})
if err != nil {
return false, nil
}
if !serviceAccountSecretsExist(clientset, namespace, sa) {
return false, nil
}
}
return true, nil
})
}
// CreateNewProject creates a new project using the clusterAdminClient, then gets a token for the adminUser and returns
// back a client for the admin user
func CreateNewProject(clientConfig *restclient.Config, projectName, adminUser string) (kclientset.Interface, *restclient.Config, error) {
projectClient, err := projectv1typedclient.NewForConfig(clientConfig)
if err != nil {
return nil, nil, err
}
kubeExternalClient, err := kubeclient.NewForConfig(clientConfig)
if err != nil {
return nil, nil, err
}
authorizationClient, err := authorizationv1typedclient.NewForConfig(clientConfig)
if err != nil {
return nil, nil, err
}
newProjectOptions := &newproject.NewProjectOptions{
ProjectClient: projectClient,
RbacClient: kubeExternalClient.RbacV1(),
SARClient: authorizationClient.SubjectAccessReviews(),
ProjectName: projectName,
AdminRole: bootstrappolicy.AdminRoleName,
AdminUser: adminUser,
UseNodeSelector: false,
IOStreams: genericclioptions.NewTestIOStreamsDiscard(),
}
if err := newProjectOptions.Run(); err != nil {
return nil, nil, err
}
kubeClient, config, err := util.GetClientForUser(clientConfig, adminUser)
return kubeClient, config, err
}
| [
"\"TEST_ETCD_DIR\"",
"\"OS_MASTER_ADDR\"",
"\"OS_DNS_ADDR\""
]
| []
| [
"OS_DNS_ADDR",
"OS_MASTER_ADDR",
"TEST_ETCD_DIR"
]
| [] | ["OS_DNS_ADDR", "OS_MASTER_ADDR", "TEST_ETCD_DIR"] | go | 3 | 0 | |
client.go | package function
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"time"
"github.com/mitchellh/go-homedir"
)
const (
// DefaultRegistry through which containers of Functions will be shuttled.
DefaultRegistry = "docker.io"
// DefaultTemplate is the default Function signature / environmental context
// of the resultant function. All runtimes are expected to have at least
// one implementation of each supported function signature. Currently that
// includes an HTTP Handler ("http") and Cloud Events handler ("events")
DefaultTemplate = "http"
// DefaultVersion is the initial value for string members whose implicit type
// is a semver.
DefaultVersion = "0.0.0"
// DefaultConfigPath is used in the unlikely event that
// the user has no home directory (no ~), there is no
// XDG_CONFIG_HOME set, and no WithConfigPath was used.
DefaultConfigPath = ".config/func"
// DefaultBuildType is the default build type for a Function
DefaultBuildType = BuildTypeLocal
)
// Client for managing Function instances.
type Client struct {
repositoriesPath string // path to repositories
repositoriesURI string // repo URI (overrides repositories path)
verbose bool // print verbose logs
builder Builder // Builds a runnable image source
pusher Pusher // Pushes Funcation image to a remote
deployer Deployer // Deploys or Updates a Function
runner Runner // Runs the Function locally
remover Remover // Removes remote services
lister Lister // Lists remote services
describer Describer // Describes Function instances
dnsProvider DNSProvider // Provider of DNS services
registry string // default registry for OCI image tags
progressListener ProgressListener // progress listener
repositories *Repositories // Repositories management
templates *Templates // Templates management
instances *Instances // Function Instances management
transport http.RoundTripper // Customizable internal transport
pipelinesProvider PipelinesProvider // CI/CD pipelines management
}
// ErrNotBuilt indicates the Function has not yet been built.
var ErrNotBuilt = errors.New("not built")
// Builder of Function source to runnable image.
type Builder interface {
// Build a Function project with source located at path.
Build(context.Context, Function) error
}
// Pusher of Function image to a registry.
type Pusher interface {
// Push the image of the Function.
// Returns Image Digest - SHA256 hash of the produced image
Push(ctx context.Context, f Function) (string, error)
}
// Deployer of Function source to running status.
type Deployer interface {
// Deploy a Function of given name, using given backing image.
Deploy(context.Context, Function) (DeploymentResult, error)
}
type DeploymentResult struct {
Status Status
URL string
}
// Status of the Function from the DeploymentResult
type Status int
const (
Failed Status = iota
Deployed
Updated
)
// Runner runs the Function locally.
type Runner interface {
// Run the Function, returning a Job with metadata, error channels, and
// a stop function.The process can be stopped by running the returned stop
// function, either on context cancellation or in a defer.
Run(context.Context, Function) (*Job, error)
}
// Remover of deployed services.
type Remover interface {
// Remove the Function from remote.
Remove(ctx context.Context, name string) error
}
// Lister of deployed functions.
type Lister interface {
// List the Functions currently deployed.
List(ctx context.Context) ([]ListItem, error)
}
type ListItem struct {
Name string `json:"name" yaml:"name"`
Namespace string `json:"namespace" yaml:"namespace"`
Runtime string `json:"runtime" yaml:"runtime"`
URL string `json:"url" yaml:"url"`
Ready string `json:"ready" yaml:"ready"`
}
// ProgressListener is notified of task progress.
type ProgressListener interface {
// SetTotal steps of the given task.
SetTotal(int)
// Increment to the next step with the given message.
Increment(message string)
// Complete signals completion, which is expected to be somewhat different
// than a step increment.
Complete(message string)
// Stopping indicates the process is in the state of stopping, such as when a
// context cancelation has been received
Stopping()
// Done signals a cessation of progress updates. Should be called in a defer
// statement to ensure the progress listener can stop any outstanding tasks
// such as synchronous user updates.
Done()
}
// Describer of Function instances
type Describer interface {
// Describe the named Function in the remote environment.
Describe(ctx context.Context, name string) (Instance, error)
}
// Instance data about the runtime state of a Function in a given environment.
//
// A Function instance is a logical running Function space, which share
// a unique route (or set of routes). Due to autoscaling and load balancing,
// there is a one to many relationship between a given route and processes.
// By default the system creates the 'local' and 'remote' named instances
// when a Function is run (locally) and deployed, respectively.
// See the .Instances(f) accessor for the map of named environments to these
// Function Information structures.
type Instance struct {
// Route is the primary route of a Function instance.
Route string
// Routes is the primary route plus any other route at which the Function
// can be contacted.
Routes []string `json:"routes" yaml:"routes"`
Name string `json:"name" yaml:"name"`
Image string `json:"image" yaml:"image"`
Namespace string `json:"namespace" yaml:"namespace"`
Subscriptions []Subscription `json:"subscriptions" yaml:"subscriptions"`
}
// Subscriptions currently active to event sources
type Subscription struct {
Source string `json:"source" yaml:"source"`
Type string `json:"type" yaml:"type"`
Broker string `json:"broker" yaml:"broker"`
}
// DNSProvider exposes DNS services necessary for serving the Function.
type DNSProvider interface {
// Provide the given name by routing requests to address.
Provide(Function) error
}
// PipelinesProvider manages lifecyle of CI/CD pipelines used by a Function
type PipelinesProvider interface {
Run(context.Context, Function) error
Remove(context.Context, Function) error
}
// New client for Function management.
func New(options ...Option) *Client {
// Instantiate client with static defaults.
c := &Client{
builder: &noopBuilder{output: os.Stdout},
pusher: &noopPusher{output: os.Stdout},
deployer: &noopDeployer{output: os.Stdout},
runner: &noopRunner{output: os.Stdout},
remover: &noopRemover{output: os.Stdout},
lister: &noopLister{output: os.Stdout},
describer: &noopDescriber{output: os.Stdout},
dnsProvider: &noopDNSProvider{output: os.Stdout},
progressListener: &NoopProgressListener{},
pipelinesProvider: &noopPipelinesProvider{},
repositoriesPath: filepath.Join(ConfigPath(), "repositories"),
transport: http.DefaultTransport,
}
for _, o := range options {
o(c)
}
// Initialize sub-managers using now-fully-initialized client.
c.repositories = newRepositories(c)
c.templates = newTemplates(c)
c.instances = newInstances(c)
// Trigger the creation of the config and repository paths
_ = ConfigPath() // Config is package-global scoped
_ = c.RepositoriesPath() // Repositories is Client-specific
return c
}
// The default config path is evaluated in the following order, from lowest
// to highest precedence.
// 1. The static default is DefaultConfigPath (./.config/func)
// 2. ~/.config/func if it exists (can be expanded: user has a home dir)
// 3. The value of $XDG_CONFIG_PATH/func if the environment variable exists.
// The path will be created if it does not already exist.
func ConfigPath() (path string) {
path = DefaultConfigPath
// ~/.config/func is the default if ~ can be expanded
if home, err := homedir.Expand("~"); err == nil {
path = filepath.Join(home, ".config", "func")
}
// 'XDG_CONFIG_HOME/func' takes precidence if defined
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
path = filepath.Join(xdg, "func")
}
mkdir(path) // make sure it exists
return
}
// RepositoriesPath accesses the currently effective repositories path,
// which defaults to [ConfigPath]/repositories but can be set explicitly using
// the WithRepositoriesPath option when creating the client..
// The path will be created if it does not already exist.
func (c *Client) RepositoriesPath() (path string) {
path = c.repositories.Path()
mkdir(path) // make sure it exists
return
}
// RepositoriesPath is a convenience method for accessing the default path to
// repositories that will be used by new instances of a Client unless options
// such as WithRepositoriesPath are used to override.
// The path will be created if it does not already exist.
func RepositoriesPath() string {
return New().RepositoriesPath()
}
// OPTIONS
// ---------
// Option defines a Function which when passed to the Client constructor
// optionally mutates private members at time of instantiation.
type Option func(*Client)
// WithVerbose toggles verbose logging.
func WithVerbose(v bool) Option {
return func(c *Client) {
c.verbose = v
}
}
// WithBuilder provides the concrete implementation of a builder.
func WithBuilder(d Builder) Option {
return func(c *Client) {
c.builder = d
}
}
// WithPusher provides the concrete implementation of a pusher.
func WithPusher(d Pusher) Option {
return func(c *Client) {
c.pusher = d
}
}
// WithDeployer provides the concrete implementation of a deployer.
func WithDeployer(d Deployer) Option {
return func(c *Client) {
c.deployer = d
}
}
// WithRunner provides the concrete implementation of a deployer.
func WithRunner(r Runner) Option {
return func(c *Client) {
c.runner = r
}
}
// WithRemover provides the concrete implementation of a remover.
func WithRemover(r Remover) Option {
return func(c *Client) {
c.remover = r
}
}
// WithLister provides the concrete implementation of a lister.
func WithLister(l Lister) Option {
return func(c *Client) {
c.lister = l
}
}
// WithDescriber provides a concrete implementation of a Function describer.
func WithDescriber(describer Describer) Option {
return func(c *Client) {
c.describer = describer
}
}
// WithProgressListener provides a concrete implementation of a listener to
// be notified of progress updates.
func WithProgressListener(p ProgressListener) Option {
return func(c *Client) {
c.progressListener = p
}
}
// WithDNSProvider proivdes a DNS provider implementation for registering the
// effective DNS name which is either explicitly set via WithName or is derived
// from the root path.
func WithDNSProvider(provider DNSProvider) Option {
return func(c *Client) {
c.dnsProvider = provider
}
}
// WithRepositoriesPath sets the location on disk to use for extensible template
// repositories. Extensible template repositories are additional templates
// that exist on disk and are not built into the binary.
func WithRepositoriesPath(path string) Option {
return func(c *Client) {
c.repositoriesPath = path
}
}
// WithRepository sets a specific URL to a Git repository from which to pull
// templates. This setting's existence precldes the use of either the inbuilt
// templates or any repositories from the extensible repositories path.
func WithRepository(uri string) Option {
return func(c *Client) {
c.repositoriesURI = uri
}
}
// WithRegistry sets the default registry which is consulted when an image name/tag
// is not explocitly provided. Can be fully qualified, including the registry
// (ex: 'quay.io/myname') or simply the namespace 'myname' which indicates the
// the use of the default registry.
func WithRegistry(registry string) Option {
return func(c *Client) {
c.registry = registry
}
}
// WithTransport sets a custom transport to use internally.
func WithTransport(t http.RoundTripper) Option {
return func(c *Client) {
c.transport = t
}
}
// WithPipelinesProvider sets implementation of provider responsible for CI/CD pipelines
func WithPipelinesProvider(pp PipelinesProvider) Option {
return func(c *Client) {
c.pipelinesProvider = pp
}
}
// ACCESSORS
// ---------
// Repositories accessor
func (c *Client) Repositories() *Repositories {
return c.repositories
}
// Templates accessor
func (c *Client) Templates() *Templates {
return c.templates
}
// Instances accessor
func (c *Client) Instances() *Instances {
return c.instances
}
// Runtimes available in totality.
// Not all repository/template combinations necessarily exist,
// and further validation is performed when a template+runtime is chosen.
// from a given repository. This is the global list of all available.
// Returned list is unique and sorted.
func (c *Client) Runtimes() ([]string, error) {
runtimes := newSortedSet()
// Gather all runtimes from all repositories
// into a uniqueness map
repositories, err := c.Repositories().All()
if err != nil {
return []string{}, err
}
for _, repo := range repositories {
for _, runtime := range repo.Runtimes {
runtimes.Add(runtime.Name)
}
}
// Return a unique, sorted list of runtimes
return runtimes.Items(), nil
}
// LIFECYCLE METHODS
// -----------------
// New Function.
// Use Create, Build and Deploy independently for lower level control.
func (c *Client) New(ctx context.Context, cfg Function) (err error) {
c.progressListener.SetTotal(3)
// Always start a concurrent routine listening for context cancellation.
// On this event, immediately indicate the task is canceling.
// (this is useful, for example, when a progress listener is mutating
// stdout, and a context cancelation needs to free up stdout entirely for
// the status or error from said cancelltion.
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
// Create Function at path indidcated by Config
if err = c.Create(cfg); err != nil {
return
}
// Load the now-initialized Function.
f, err := NewFunction(cfg.Root)
if err != nil {
return
}
// Build the now-initialized Function
c.progressListener.Increment("Building container image")
if err = c.Build(ctx, f.Root); err != nil {
return
}
// Push the produced function image
c.progressListener.Increment("Pushing container image to registry")
if err = c.Push(ctx, f.Root); err != nil {
return
}
// Deploy the initialized Function, returning its publicly
// addressible name for possible registration.
c.progressListener.Increment("Deploying Function to cluster")
if err = c.Deploy(ctx, f.Root); err != nil {
return
}
// Create an external route to the Function
c.progressListener.Increment("Creating route to Function")
if err = c.Route(f.Root); err != nil {
return
}
c.progressListener.Complete("Done")
// TODO: use the knative client during deployment such that the actual final
// route can be returned from the deployment step, passed to the DNS Router
// for routing actual traffic, and returned here.
if c.verbose {
fmt.Printf("https://%v/\n", f.Name)
}
return
}
// Create a new Function from the given defaults.
// <path> will default to the absolute path of the current working directory.
// <name> will default to the current working directory.
// When <name> is provided but <path> is not, a directory <name> is created
// in the current working directory and used for <path>.
func (c *Client) Create(cfg Function) (err error) {
// convert Root path to absolute
cfg.Root, err = filepath.Abs(cfg.Root)
if err != nil {
return
}
// Create project root directory, if it doesn't already exist
if err = os.MkdirAll(cfg.Root, 0755); err != nil {
return
}
// Create should never clobber a pre-existing Function
hasFunc, err := hasInitializedFunction(cfg.Root)
if err != nil {
return err
}
if hasFunc {
return fmt.Errorf("Function at '%v' already initialized", cfg.Root)
}
// Path is defaulted to the current working directory
if cfg.Root == "" {
if cfg.Root, err = os.Getwd(); err != nil {
return
}
}
// Name is defaulted to the directory of the given path.
if cfg.Name == "" {
cfg.Name = nameFromPath(cfg.Root)
}
// The path for the new Function should not have any contentious files
// (hidden files OK, unless it's one used by Func)
if err := assertEmptyRoot(cfg.Root); err != nil {
return err
}
// Create a new Function (in memory)
f := NewFunctionWith(cfg)
// Create a .func diretory which is also added to a .gitignore
if err = createRuntimeDir(f); err != nil {
return
}
// Write out the new Function's Template files.
// Templates contain values which may result in the Function being mutated
// (default builders, etc), so a new (potentially mutated) Function is
// returned from Templates.Write
err = c.Templates().Write(&f)
if err != nil {
return
}
// Mark the Function as having been created
f.Created = time.Now()
if err = f.Write(); err != nil {
return
}
// TODO: Create a status structure and return it for clients to use
// for output, such as from the CLI.
if c.verbose {
fmt.Printf("Builder: %s\n", f.Builder)
if len(f.Buildpacks) > 0 {
fmt.Println("Buildpacks:")
for _, b := range f.Buildpacks {
fmt.Printf(" ... %s\n", b)
}
}
fmt.Println("Function project created")
}
return
}
// createRuntimeDir creates a .func directory in the root of the given
// Function which is also registered as ignored in .gitignore
// TODO: Mutate extant .gitignore file if it exists rather than failing
// if present (see contentious files in function.go), such that a user
// can `git init` a directory prior to `func init` in the same directory).
func createRuntimeDir(f Function) error {
if err := os.MkdirAll(filepath.Join(f.Root, RunDataDir), os.ModePerm); err != nil {
return err
}
gitignore := `
# Functions use the .func directory for local runtime data which should
# generally not be tracked in source control:
/.func
`
return os.WriteFile(filepath.Join(f.Root, ".gitignore"), []byte(gitignore), os.ModePerm)
}
// Build the Function at path. Errors if the Function is either unloadable or does
// not contain a populated Image.
func (c *Client) Build(ctx context.Context, path string) (err error) {
c.progressListener.Increment("Building function image")
// If not logging verbosely, the ongoing progress of the build will not
// be streaming to stdout, and the lack of activity has been seen to cause
// users to prematurely exit due to the sluggishness of pulling large images
if !c.verbose {
c.printBuildActivity(ctx) // print friendly messages until context is canceled
}
f, err := NewFunction(path)
if err != nil {
return
}
// Derive Image from the path (precedence is given to extant config)
if f.Image, err = DerivedImage(path, c.registry); err != nil {
return
}
if err = c.builder.Build(ctx, f); err != nil {
return
}
// Write (save) - Serialize the Function to disk
// Will now contain populated image tag.
if err = f.Write(); err != nil {
return
}
// TODO: create a status structure and return it here for optional
// use by the cli for user echo (rather than rely on verbose mode here)
message := fmt.Sprintf("🙌 Function image built: %v", f.Image)
if runtime.GOOS == "windows" {
message = fmt.Sprintf("Function image built: %v", f.Image)
}
c.progressListener.Increment(message)
return
}
func (c *Client) printBuildActivity(ctx context.Context) {
m := []string{
"Still building",
"Still building",
"Yes, still building",
"Don't give up on me",
"Still building",
"This is taking a while",
}
i := 0
ticker := time.NewTicker(10 * time.Second)
go func() {
for {
select {
case <-ticker.C:
c.progressListener.Increment(m[i])
i++
i = i % len(m)
case <-ctx.Done():
c.progressListener.Stopping()
ticker.Stop()
return
}
}
}()
}
// Deploy the Function at path. Errors if the Function has not been
// initialized with an image tag.
func (c *Client) Deploy(ctx context.Context, path string) (err error) {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
f, err := NewFunction(path)
if err != nil {
return
}
// Functions must be built (have an associated image) before being deployed.
// Note that externally built images may be specified in the func.yaml
if !f.Built() {
return ErrNotBuilt
}
// Deploy a new or Update the previously-deployed Function
c.progressListener.Increment("Deploying function to the cluster")
result, err := c.deployer.Deploy(ctx, f)
if result.Status == Deployed {
c.progressListener.Increment(fmt.Sprintf("Function deployed at URL: %v", result.URL))
} else if result.Status == Updated {
c.progressListener.Increment(fmt.Sprintf("Function updated at URL: %v", result.URL))
}
return err
}
// RunPipeline runs a Pipeline to Build and deploy the Function at path.
func (c *Client) RunPipeline(ctx context.Context, path string, git Git) (err error) {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
f, err := NewFunction(path)
if err != nil {
err = fmt.Errorf("failed to laod function: %w", err)
return
}
f.Git = git
// Build and deploy function using Pipeline
err = c.pipelinesProvider.Run(ctx, f)
if err != nil {
err = fmt.Errorf("failed to run pipeline: %w", err)
return
}
return err
}
func (c *Client) Route(path string) (err error) {
// Ensure that the allocated final address is enabled with the
// configured DNS provider.
// NOTE:
// DNS and TLS are provisioned by Knative Serving + cert-manager,
// but DNS subdomain CNAME to the Kourier Load Balancer is
// still manual, and the initial cluster config to suppot the TLD
// is still manual.
f, err := NewFunction(path)
if err != nil {
return
}
return c.dnsProvider.Provide(f)
}
// Run the Function whose code resides at root.
// On start, the chosen port is sent to the provided started channel
func (c *Client) Run(ctx context.Context, root string) (job *Job, err error) {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
// Load the Function
f, err := NewFunction(root)
if err != nil {
return
}
if !f.Initialized() {
// TODO: this needs a test.
err = fmt.Errorf("the given path '%v' does not contain an initialized "+
"Function. Please create one at this path in order to run", root)
return
}
// Run the Function, which returns a Job for use interacting (at arms length)
// with that running task (which is likely inside a container process).
if job, err = c.runner.Run(ctx, f); err != nil {
return
}
// Return to the caller the effective port, a function to call to trigger
// stop, and a channel on which can be received runtime errors.
return job, nil
}
// Info for a Function. Name takes precidence. If no name is provided,
// the Function defined at root is used.
func (c *Client) Info(ctx context.Context, name, root string) (d Instance, err error) {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
// If name is provided, it takes precidence.
// Otherwise load the Function defined at root.
if name != "" {
return c.describer.Describe(ctx, name)
}
f, err := NewFunction(root)
if err != nil {
return d, err
}
if !f.Initialized() {
return d, fmt.Errorf("%v is not initialized", f.Name)
}
return c.describer.Describe(ctx, f.Name)
}
// List currently deployed Functions.
func (c *Client) List(ctx context.Context) ([]ListItem, error) {
// delegate to concrete implementation of lister entirely.
return c.lister.List(ctx)
}
// Remove a Function. Name takes precidence. If no name is provided,
// the Function defined at root is used if it exists.
func (c *Client) Remove(ctx context.Context, cfg Function, deleteAll bool) error {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
// If name is provided, it takes precidence.
// Otherwise load the Function defined at root.
functionName := cfg.Name
if cfg.Name == "" {
f, err := NewFunction(cfg.Root)
if err != nil {
return err
}
if !f.Initialized() {
return fmt.Errorf("Function at %v can not be removed unless initialized. Try removing by name", f.Root)
}
functionName = f.Name
cfg = f
}
// Delete Knative Service and dependent resources in parallel
c.progressListener.Increment(fmt.Sprintf("Removing Knative Service: %v", functionName))
errChan := make(chan error)
go func() {
errChan <- c.remover.Remove(ctx, functionName)
}()
var errResources error
if deleteAll {
c.progressListener.Increment(fmt.Sprintf("Removing Knative Service '%v' and all dependent resources", functionName))
errResources = c.pipelinesProvider.Remove(ctx, cfg)
}
errService := <-errChan
if errService != nil && errResources != nil {
return fmt.Errorf("%s\n%s", errService, errResources)
} else if errResources != nil {
return errResources
}
return errService
}
// Invoke is a convenience method for triggering the execution of a Function
// for testing and development. Returned is a map of metadata and a stringified
// version of the content.
// The target argument is optional, naming the running instance of the Function
// which should be invoked. This can be the literal names "local" or "remote",
// or can be a URL to an arbitrary endpoint. If not provided, a running local
// instance is preferred, with the remote Function triggered if there is no
// locally running instance.
// Example:
// myClient.Invoke(myContext, myFunction, "local", NewInvokeMessage())
// The message sent to the Function is defined by the invoke message.
// See NewInvokeMessage for its defaults.
// Functions are invoked in a manner consistent with the settings defined in
// their metadata. For example HTTP vs CloudEvent
func (c *Client) Invoke(ctx context.Context, root string, target string, m InvokeMessage) (metadata map[string][]string, body string, err error) {
go func() {
<-ctx.Done()
c.progressListener.Stopping()
}()
f, err := NewFunction(root)
if err != nil {
return
}
// See invoke.go for implementation details
return invoke(ctx, c, f, target, m, c.verbose)
}
// Push the image for the named service to the configured registry
func (c *Client) Push(ctx context.Context, path string) (err error) {
f, err := NewFunction(path)
if err != nil {
return
}
if !f.Built() {
return ErrNotBuilt
}
imageDigest, err := c.pusher.Push(ctx, f)
if err != nil {
return
}
// Record the Image Digest pushed.
f.ImageDigest = imageDigest
return f.Write()
}
// DEFAULTS
// ---------
// Manual implementations (noops) of required interfaces.
// In practice, the user of this client package (for example the CLI) will
// provide a concrete implementation for only the interfaces necessary to
// complete the given command. Integrators importing the package would
// provide a concrete implementation for all interfaces to be used. To
// enable partial definition (in particular used for testing) they
// are defaulted to noop implementations such that they can be provded
// only when necessary. Unit tests for the concrete implementations
// serve to keep the core logic here separate from the imperitive, and
// with a minimum of external dependencies.
// -----------------------------------------------------
// Builder
type noopBuilder struct{ output io.Writer }
func (n *noopBuilder) Build(ctx context.Context, _ Function) error { return nil }
// Pusher
type noopPusher struct{ output io.Writer }
func (n *noopPusher) Push(ctx context.Context, f Function) (string, error) { return "", nil }
// Deployer
type noopDeployer struct{ output io.Writer }
func (n *noopDeployer) Deploy(ctx context.Context, _ Function) (DeploymentResult, error) {
return DeploymentResult{}, nil
}
// Runner
type noopRunner struct{ output io.Writer }
func (n *noopRunner) Run(context.Context, Function) (job *Job, err error) {
return
}
// Remover
type noopRemover struct{ output io.Writer }
func (n *noopRemover) Remove(context.Context, string) error { return nil }
// Lister
type noopLister struct{ output io.Writer }
func (n *noopLister) List(context.Context) ([]ListItem, error) { return []ListItem{}, nil }
// Describer
type noopDescriber struct{ output io.Writer }
func (n *noopDescriber) Describe(context.Context, string) (Instance, error) {
return Instance{}, errors.New("no describer provided")
}
// PipelinesProvider
type noopPipelinesProvider struct{}
func (n *noopPipelinesProvider) Run(ctx context.Context, _ Function) error { return nil }
func (n *noopPipelinesProvider) Remove(ctx context.Context, _ Function) error { return nil }
// DNSProvider
type noopDNSProvider struct{ output io.Writer }
func (n *noopDNSProvider) Provide(_ Function) error { return nil }
// ProgressListener
type NoopProgressListener struct{}
func (p *NoopProgressListener) SetTotal(i int) {}
func (p *NoopProgressListener) Increment(m string) {}
func (p *NoopProgressListener) Complete(m string) {}
func (p *NoopProgressListener) Stopping() {}
func (p *NoopProgressListener) Done() {}
// mkdir attempts to mkdir, writing any errors to stderr.
func mkdir(path string) {
// Since it is expected that the code elsewhere never assume directories
// exist (doing so is a racing condition), it is valid to simply
// handle errors at this level.
if err := os.MkdirAll(path, 0700); err != nil {
fmt.Fprintf(os.Stderr, "Error creating '%v': %v", path, err)
debug.PrintStack()
}
}
| [
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_CONFIG_HOME"
]
| [] | ["XDG_CONFIG_HOME"] | go | 1 | 0 | |
python/pyspark/pandas/base.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for pandas-on-Spark objects.
"""
from abc import ABCMeta, abstractmethod
from functools import wraps, partial
from itertools import chain
from typing import Any, Callable, Optional, Sequence, Tuple, Union, cast, TYPE_CHECKING
import numpy as np
import pandas as pd # noqa: F401
from pandas.api.types import is_list_like, CategoricalDtype
from pyspark.sql import functions as F, Column, Window
from pyspark.sql.types import (
DoubleType,
FloatType,
LongType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, IndexOpsLike, Label, SeriesOrIndex
from pyspark.pandas.config import get_option, option_context
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkIndexOpsMethods
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from pyspark.pandas.frame import DataFrame
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.data_type_ops.base import DataTypeOps # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def should_alignment_for_column_op(self: SeriesOrIndex, other: SeriesOrIndex) -> bool:
from pyspark.pandas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(
func: Callable[..., Column], this_index_ops: SeriesOrIndex, *args: Any
) -> SeriesOrIndex:
"""
Align the `IndexOpsMixin` objects and apply the function.
Parameters
----------
func : The function to apply
this_index_ops : IndexOpsMixin
A base `IndexOpsMixin` object
args : list of other arguments including other `IndexOpsMixin` objects
Returns
-------
`Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`
"""
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(
this_index_ops.to_frame(),
*[cast(Series, col).rename(i) for i, col in enumerate(cols)],
how="full"
)
return column_op(func)(
combined["this"]._psser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
# This could cause as many counts, reset_index calls, joins for combining
# as the number of `Index`s in `args`. So far it's fine since we can assume the ops
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return Index(
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
).sort_index(),
name=this_index_ops.name,
)
elif isinstance(this_index_ops, Series):
this = cast(DataFrame, this_index_ops.reset_index())
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col)
.rename(i)
.reset_index(drop=True)
for i, col in enumerate(cols)
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._psdf[
[
cast(Series, col.to_series() if isinstance(col, Index) else col).rename(i)
for i, col in enumerate(cols)
]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index,
*[
other._psser_for(label)
for label, col in zip(other._internal.column_labels, cols)
]
).rename(that_series.name)
def booleanize_null(scol: Column, f: Callable[..., Column]) -> Column:
"""
Booleanize Null in Spark Column
"""
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
return scol
def column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
"""
A decorator that wraps APIs taking/returning Spark Column so that pandas-on-Spark Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes pandas-on-Spark Series as well and returns
pandas-on-Spark Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: pandas-on-Spark Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is pandas-on-Spark Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, (Series, Index))]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
scol = f(
self.spark.column,
*[arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
)
field = InternalField.from_struct_field(
self._internal.spark_frame.select(scol).schema[0],
use_extension_dtypes=any(
isinstance(col.dtype, extension_dtypes) for col in [self] + cols
),
)
if not field.is_extension_dtype:
scol = booleanize_null(scol, f).alias(field.name)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol, field=field)
else:
psser = next(col for col in cols if isinstance(col, Series))
index_ops = psser._with_new_scol(scol, field=field)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
"""
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _psdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(
self: IndexOpsLike, scol: Column, *, field: Optional[InternalField] = None
) -> IndexOpsLike:
pass
@property
@abstractmethod
def _column_label(self) -> Optional[Label]:
pass
@property
@abstractmethod
def spark(self: IndexOpsLike) -> SparkIndexOpsMethods[IndexOpsLike]:
pass
@property
def _dtype_op(self) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.base import DataTypeOps
return DataTypeOps(self.dtype, self.spark.data_type)
@abstractmethod
def copy(self: IndexOpsLike) -> IndexOpsLike:
pass
# arithmetic operators
def __neg__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.neg(self)
def __add__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.add(self, other)
def __sub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.sub(self, other)
def __mul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mul(self, other)
def __truediv__(self, other: Any) -> SeriesOrIndex:
"""
__truediv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.truediv(self, other)
def __mod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mod(self, other)
def __radd__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.radd(self, other)
def __rsub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rsub(self, other)
def __rmul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmul(self, other)
def __rtruediv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rtruediv(self, other)
def __floordiv__(self, other: Any) -> SeriesOrIndex:
"""
__floordiv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.floordiv(self, other)
def __rfloordiv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rfloordiv(self, other)
def __rmod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmod(self, other)
def __pow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.pow(self, other)
def __rpow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rpow(self, other)
def __abs__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.abs(self)
# comparison operators
def __eq__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return self._dtype_op.eq(self, other)
def __ne__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return self._dtype_op.ne(self, other)
def __lt__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.lt(self, other)
def __le__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.le(self, other)
def __ge__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ge(self, other)
def __gt__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.gt(self, other)
def __invert__(self: IndexOpsLike) -> IndexOpsLike:
return self._dtype_op.invert(self)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
def __and__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__and__(self, other)
def __or__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__or__(self, other)
def __rand__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rand(self, other)
def __ror__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ror(self, other)
def __len__(self) -> int:
return len(self._psdf)
# NDArray Compat
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
) -> SeriesOrIndex:
from pyspark.pandas import numpy_compat
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return cast(SeriesOrIndex, result)
else:
# TODO: support more APIs?
raise NotImplementedError(
"pandas-on-Spark objects currently do not support %s." % ufunc
)
@property
def dtype(self) -> Dtype:
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ps.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ps.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return self._internal.data_fields[0].dtype
@property
def empty(self) -> bool:
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ps.range(10).id.empty
False
>>> ps.range(0).id.empty
True
>>> ps.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ps.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ps.Series(['a', None]).hasnans
True
>>> ps.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ps.Series([1, 2, 3]).hasnans
False
>>> (ps.Series([1.0, 2.0, np.nan]) + 1).hasnans
True
>>> ps.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ps.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ps.Series([1])
>>> ser.is_monotonic
True
>>> ser = ps.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic
True
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic
False
"""
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ps.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ps.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ps.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic_decreasing
False
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic_decreasing
True
"""
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order: str) -> Column:
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order: str) -> bool:
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
# The number of rows here will be as the same of partitions, which is expected
# to be small.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), SF.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ps.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ps.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
return self._dtype_op.astype(self, dtype)
def isin(self: IndexOpsLike, values: Sequence[Any]) -> IndexOpsLike:
"""
Check whether `values` are contained in Series or Index.
Return a boolean Series or Index showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test.
Returns
-------
isin : Series (bool dtype) or Index (bool dtype)
Examples
--------
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
return self._with_new_scol(self.spark.column.isin([SF.lit(v) for v in values]))
def isnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
return self._dtype_op.isnull(self)
isna = isnull
def notnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(self.name) # type: ignore
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Axis = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([True, True]).all()
True
>>> ps.Series([True, False]).all()
False
>>> ps.Series([0, 1]).all()
False
>>> ps.Series([1, 2, 3]).all()
True
>>> ps.Series([True, True, None]).all()
True
>>> ps.Series([True, False, None]).all()
False
>>> ps.Series([]).all()
True
>>> ps.Series([np.nan]).all()
True
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), SF.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Axis = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([False, False]).any()
False
>>> ps.Series([True, False]).any()
True
>>> ps.Series([0, 0]).any()
False
>>> ps.Series([0, 1, 2]).any()
True
>>> ps.Series([False, False, None]).any()
False
>>> ps.Series([True, False, None]).any()
True
>>> ps.Series([]).any()
False
>>> ps.Series([np.nan]).any()
False
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), SF.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(
self: IndexOpsLike, periods: int = 1, fill_value: Optional[Any] = None
) -> IndexOpsLike:
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value).spark.analyzed
def _shift(
self: IndexOpsLike,
periods: int,
fill_value: Any,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> IndexOpsLike:
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col, field=self._internal.data_fields[0].copy(nullable=True))
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins: None = None,
dropna: bool = True,
) -> "Series":
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
For Series
>>> df = ps.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
For Index
>>> idx = ps.Index([3, 1, 2, 3, 4, np.nan])
>>> idx
Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')
>>> idx.value_counts().sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**sort**
With `sort` set to `False`, the result wouldn't be sorted by number of count.
>>> idx.value_counts(sort=True).sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**normalize**
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> idx.value_counts(normalize=True).sort_index()
1.0 0.2
2.0 0.2
3.0 0.4
4.0 0.2
dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP
1.0 1
2.0 1
3.0 2
4.0 1
NaN 1
dtype: int64
For MultiIndex.
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index # doctest: +SKIP
MultiIndex([( 'lama', 'weight'),
( 'lama', 'weight'),
( 'lama', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'length'),
('falcon', 'length')],
)
>>> s.index.value_counts().sort_index()
(cow, length) 1
(cow, weight) 2
(falcon, length) 2
(falcon, weight) 1
(lama, weight) 3
dtype: int64
>>> s.index.value_counts(normalize=True).sort_index()
(cow, length) 0.111111
(cow, weight) 0.222222
(falcon, length) 0.222222
(falcon, weight) 0.111111
(lama, weight) 0.333333
dtype: float64
If Index has name, keep the name up.
>>> idx = ps.Index([0, 0, 0, 1, 1, 2, 3], name='pandas-on-Spark')
>>> idx.value_counts().sort_index()
0 3
1 2
2 1
3 1
Name: pandas-on-Spark, dtype: int64
"""
from pyspark.pandas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / SF.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> ps.Series([1, 2, 3, np.nan]).nunique()
3
>>> ps.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ps.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
>>> idx = ps.Index([1, 1, 2, None])
>>> idx
Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')
>>> idx.nunique()
2
>>> idx.nunique(dropna=False)
3
"""
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> Column:
colname = self._internal.data_spark_column_names[0]
count_fn = cast(
Callable[[Column], Column],
partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct,
)
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self: IndexOpsLike, indices: Sequence[int]) -> IndexOpsLike:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
Series
>>> psser = ps.Series([100, 200, 300, 400, 500])
>>> psser
0 100
1 200
2 300
3 400
4 500
dtype: int64
>>> psser.take([0, 2, 4]).sort_index()
0 100
2 300
4 500
dtype: int64
Index
>>> psidx = ps.Index([100, 200, 300, 400, 500])
>>> psidx
Int64Index([100, 200, 300, 400, 500], dtype='int64')
>>> psidx.take([0, 2, 4]).sort_values()
Int64Index([100, 300, 500], dtype='int64')
MultiIndex
>>> psmidx = ps.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")])
>>> psmidx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c')],
)
>>> psmidx.take([0, 2]) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'c')],
)
"""
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if isinstance(self, ps.Series):
return cast(IndexOpsLike, self.iloc[indices])
else:
return cast(IndexOpsLike, self._psdf.iloc[indices].index)
def factorize(
self: IndexOpsLike, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple[IndexOpsLike, pd.Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values.
Parameters
----------
sort : bool, default True
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
Returns
-------
codes : Series or Index
A Series or Index that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : pd.Index
The unique valid values.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
Examples
--------
>>> psser = ps.Series(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psser.factorize()
>>> codes
0 1
1 -1
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=None)
>>> codes
0 1
1 3
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c', None], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=-2)
>>> codes
0 1
1 -2
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
For Index:
>>> psidx = ps.Index(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psidx.factorize()
>>> codes
Int64Index([1, -1, 0, 2, 1], dtype='int64')
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
"""
from pyspark.pandas.series import first_series
assert (na_sentinel is None) or isinstance(na_sentinel, int)
assert sort is True
if isinstance(self.dtype, CategoricalDtype):
categories = self.dtype.categories
if len(categories) == 0:
scol = SF.lit(None)
else:
kvs = list(
chain(
*[
(SF.lit(code), SF.lit(category))
for code, category in enumerate(categories)
]
)
)
map_scol = F.create_map(*kvs)
scol = map_scol.getItem(self.spark.column)
codes, uniques = self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0])
).factorize(na_sentinel=na_sentinel)
return codes, uniques.astype(self.dtype)
uniq_sdf = self._internal.spark_frame.select(self.spark.column).distinct()
# Check number of uniques and constructs sorted `uniques_list`
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
uniq_pdf = uniq_sdf.limit(max_compute_count + 1).toPandas()
if len(uniq_pdf) > max_compute_count:
raise ValueError(
"Current Series has more then {0} unique values. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
else:
uniq_pdf = uniq_sdf.toPandas()
# pandas takes both NaN and null in Spark to np.nan, so de-duplication is required
uniq_series = first_series(uniq_pdf).drop_duplicates()
uniques_list = uniq_series.tolist()
uniques_list = sorted(uniques_list, key=lambda x: (pd.isna(x), x))
# Constructs `unique_to_code` mapping non-na unique to code
unique_to_code = {}
if na_sentinel is not None:
na_sentinel_code = na_sentinel
code = 0
for unique in uniques_list:
if pd.isna(unique):
if na_sentinel is None:
na_sentinel_code = code
else:
unique_to_code[unique] = code
code += 1
kvs = list(
chain(*([(SF.lit(unique), SF.lit(code)) for unique, code in unique_to_code.items()]))
)
if len(kvs) == 0: # uniques are all missing values
new_scol = SF.lit(na_sentinel_code)
else:
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
map_scol = F.create_map(*kvs)
null_scol = F.when(cond, SF.lit(na_sentinel_code))
new_scol = null_scol.otherwise(map_scol.getItem(scol))
codes = self._with_new_scol(new_scol.alias(self._internal.data_spark_column_names[0]))
if na_sentinel is not None:
# Drops the NaN from the uniques of the values
uniques_list = [x for x in uniques_list if not pd.isna(x)]
uniques = pd.Index(uniques_list)
return codes, uniques
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.base
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.base.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.base tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.base,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| []
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | python | 1 | 0 | |
flash/text/seq2seq/core/data.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import datasets
import torch
from datasets import DatasetDict, load_dataset
from torch import Tensor
from transformers import AutoTokenizer, default_data_collator
from flash.data.data_module import DataModule
from flash.data.data_source import DataSource, DefaultDataSources
from flash.data.process import Preprocess
class Seq2SeqDataSource(DataSource):
def __init__(
self,
backbone: str,
max_source_length: int = 128,
max_target_length: int = 128,
padding: Union[str, bool] = 'max_length'
):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(backbone, use_fast=True)
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.padding = padding
def _tokenize_fn(
self,
ex: Union[Dict[str, str], str],
input: Optional[str] = None,
target: Optional[str] = None,
) -> Callable:
if isinstance(ex, dict):
ex_input = ex[input]
ex_target = ex[target] if target else None
else:
ex_input = ex
ex_target = None
return self.tokenizer.prepare_seq2seq_batch(
src_texts=ex_input,
tgt_texts=ex_target,
max_length=self.max_source_length,
max_target_length=self.max_target_length,
padding=self.padding,
)
class Seq2SeqFileDataSource(Seq2SeqDataSource):
def __init__(
self,
filetype: str,
backbone: str,
max_source_length: int = 128,
max_target_length: int = 128,
padding: Union[str, bool] = 'max_length',
):
super().__init__(backbone, max_source_length, max_target_length, padding)
self.filetype = filetype
def load_data(
self,
data: Any,
use_full: bool = False,
columns: List[str] = ["input_ids", "attention_mask", "labels"]
) -> 'datasets.Dataset':
file, input, target = data
data_files = {}
stage = self._running_stage.value
data_files[stage] = str(file)
# FLASH_TESTING is set in the CI to run faster.
if use_full and os.getenv("FLASH_TESTING", "0") == "0":
dataset_dict = load_dataset(self.filetype, data_files=data_files)
else:
# used for debugging. Avoid processing the entire dataset # noqa E265
try:
dataset_dict = DatasetDict({
stage: load_dataset(self.filetype, data_files=data_files, split=[f'{stage}[:20]'])[0]
})
except AssertionError:
dataset_dict = load_dataset(self.filetype, data_files=data_files)
dataset_dict = dataset_dict.map(partial(self._tokenize_fn, input=input, target=target), batched=True)
dataset_dict.set_format(columns=columns)
return dataset_dict[stage]
def predict_load_data(self, data: Any) -> Union['datasets.Dataset', List[Dict[str, torch.Tensor]]]:
return self.load_data(data, use_full=False, columns=["input_ids", "attention_mask"])
class Seq2SeqCSVDataSource(Seq2SeqFileDataSource):
def __init__(
self,
backbone: str,
max_source_length: int = 128,
max_target_length: int = 128,
padding: Union[str, bool] = 'max_length',
):
super().__init__(
"csv",
backbone,
max_source_length=max_source_length,
max_target_length=max_target_length,
padding=padding,
)
class Seq2SeqJSONDataSource(Seq2SeqFileDataSource):
def __init__(
self,
backbone: str,
max_source_length: int = 128,
max_target_length: int = 128,
padding: Union[str, bool] = 'max_length',
):
super().__init__(
"json",
backbone,
max_source_length=max_source_length,
max_target_length=max_target_length,
padding=padding,
)
class Seq2SeqSentencesDataSource(Seq2SeqDataSource):
def load_data(
self,
data: Union[str, List[str]],
dataset: Optional[Any] = None,
) -> List[Any]:
if isinstance(data, str):
data = [data]
return [self._tokenize_fn(s) for s in data]
class Seq2SeqPreprocess(Preprocess):
def __init__(
self,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
backbone: str = "sshleifer/tiny-mbart",
max_source_length: int = 128,
max_target_length: int = 128,
padding: Union[str, bool] = 'max_length'
):
self.backbone = backbone
self.max_target_length = max_target_length
self.max_source_length = max_source_length
self.padding = padding
super().__init__(
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_sources={
DefaultDataSources.CSV: Seq2SeqCSVDataSource(
self.backbone,
max_source_length=max_source_length,
max_target_length=max_target_length,
padding=padding,
),
DefaultDataSources.JSON: Seq2SeqJSONDataSource(
self.backbone,
max_source_length=max_source_length,
max_target_length=max_target_length,
padding=padding,
),
"sentences": Seq2SeqSentencesDataSource(
self.backbone,
max_source_length=max_source_length,
max_target_length=max_target_length,
padding=padding,
),
},
default_data_source="sentences",
)
def get_state_dict(self) -> Dict[str, Any]:
return {
**self.transforms,
"backbone": self.backbone,
"max_source_length": self.max_source_length,
"max_target_length": self.max_target_length,
"padding": self.padding,
}
@classmethod
def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool):
return cls(**state_dict)
def collate(self, samples: Any) -> Tensor:
"""Override to convert a set of samples to a batch"""
return default_data_collator(samples)
class Seq2SeqData(DataModule):
"""Data module for Seq2Seq tasks."""
preprocess_cls = Seq2SeqPreprocess
| []
| []
| [
"FLASH_TESTING"
]
| [] | ["FLASH_TESTING"] | python | 1 | 0 | |
_tools/src/github.com/gomeet/gomeet/utils/project/helpers/helpers.go | package helpers
import (
"log"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"strings"
"github.com/fatih/color"
"github.com/xtgo/set"
)
const (
GomeetDefaultPrefixes = "svc-,gomeet-svc-"
GomeetRetoolRev = "64982169262e2ced1b9d0e9e328fe5fdd7a336f9"
)
type Empty struct{}
type PkgNfo struct {
goPkg string
name string
path string
shortName string
prefix string
defaultPrefixes string
projectGroupGoPkg string
projectGroupName string
}
type LogType int
const (
LogError LogType = iota - 1 // -1
LogDangerous // 0
LogSkipping // 1
LogReplacing // 2
LogCreating // 3
LogInfo // 4
)
func Log(t LogType, msg string) {
var p, head string
switch t {
case LogError:
p, head = "%s - %s\n", color.RedString("[Error]")
case LogDangerous:
p, head = "%s - %s\n", color.RedString("[Dangerous]")
case LogSkipping:
p, head = "%s - %s\n", color.YellowString("[Skipping]")
case LogReplacing:
p, head = "%s - %s\n", color.YellowString("[Replacing]")
case LogCreating:
p, head = "%s - %s\n", color.GreenString("[Creating]")
case LogInfo:
p, head = "%s - %s\n", color.CyanString("[Info]")
default:
p, head = "%s - %s\n", "[Unknow]"
}
log.Printf(p, head, msg)
}
func NewPkgNfo(goPkg, defaultPrefixes string) (*PkgNfo, error) {
pNfo := &PkgNfo{}
if err := pNfo.setGoPkg(goPkg); err != nil {
return nil, err
}
if err := pNfo.SetDefaultPrefixes(defaultPrefixes); err != nil {
return nil, err
}
return pNfo, nil
}
func (pNfo *PkgNfo) setGoPkg(goPkg string) (err error) {
if pNfo.path, err = Path(goPkg); err != nil {
return err
}
pNfo.goPkg = goPkg
pNfo.name = strings.ToLower(LastFromSplit(pNfo.GoPkg(), "/"))
if err = pNfo.SetDefaultPrefixes(pNfo.DefaultPrefixes()); err != nil {
return err
}
pNfo.projectGroupGoPkg = filepath.Dir(pNfo.GoPkg())
splitProjectGoPkg := strings.Split(pNfo.projectGroupGoPkg, string(filepath.Separator))
pNfo.projectGroupName = pNfo.name
if l := len(splitProjectGoPkg); l > 1 {
switch len(splitProjectGoPkg) {
case 1:
pNfo.projectGroupName = splitProjectGoPkg[0]
case 2:
pNfo.projectGroupName = splitProjectGoPkg[1]
default:
pNfo.projectGroupName = splitProjectGoPkg[l-1]
}
}
remplacer := strings.NewReplacer(".", "", "-", "")
pNfo.projectGroupName = remplacer.Replace(pNfo.projectGroupName)
return nil
}
func (pNfo *PkgNfo) setShortNameAndPrefix() {
pNfo.prefix, pNfo.shortName = ExtractPrefix(pNfo.Name(), pNfo.DefaultPrefixes())
}
func (pNfo *PkgNfo) SetDefaultPrefixes(s string) error {
pNfo.defaultPrefixes = NormalizeDefaultPrefixes(s)
pNfo.setShortNameAndPrefix()
return nil
}
func (pNfo PkgNfo) DefaultPrefixes() string { return pNfo.defaultPrefixes }
func (pNfo PkgNfo) Prefix() string { return pNfo.prefix }
func (pNfo PkgNfo) Name() string { return pNfo.name }
func (pNfo PkgNfo) ShortName() string { return pNfo.shortName }
func (pNfo PkgNfo) GoPkg() string { return pNfo.goPkg }
func (pNfo PkgNfo) ProjectGroupGoPkg() string { return pNfo.projectGroupGoPkg }
func (pNfo PkgNfo) ProjectGroupName() string { return pNfo.projectGroupName }
func (pNfo PkgNfo) Path() string { return pNfo.path }
// Copied and re-worked from
// https://github.com/spf13/cobra/bl ob/master/cobra/cmd/helpers.go
func Path(inputPath string) (string, error) {
// if no path is provided... assume CWD.
if inputPath == "" {
x, err := os.Getwd()
if err != nil {
return "", err
}
return x, nil
}
var projectPath string
var projectBase string
srcPath := SrcPath()
// if provided, inspect for logical locations
if strings.ContainsRune(inputPath, os.PathSeparator) {
if filepath.IsAbs(inputPath) || filepath.HasPrefix(inputPath, string(os.PathSeparator)) {
// if Absolute, use it
projectPath = filepath.Clean(inputPath)
return projectPath, nil
}
// If not absolute but contains slashes,
// assuming it means create it from $GOPATH
count := strings.Count(inputPath, string(os.PathSeparator))
if count == 1 {
projectPath = filepath.Join(srcPath, "github.com", inputPath)
} else {
projectPath = filepath.Join(srcPath, inputPath)
}
return projectPath, nil
}
// hardest case.. just a word.
if projectBase == "" {
x, err := os.Getwd()
if err == nil {
projectPath = filepath.Join(x, inputPath)
return projectPath, nil
}
return "", err
}
projectPath = filepath.Join(srcPath, projectBase, inputPath)
return projectPath, nil
}
func NormalizeDefaultPrefixes(s string) string {
if s != "" {
prefixes := strings.Split(GomeetDefaultPrefixes+","+s, ",")
data := sort.StringSlice(prefixes)
sort.Sort(data)
n := set.Uniq(data)
prefixes = data[:n]
return strings.Join(prefixes, ",")
}
return GomeetDefaultPrefixes
}
func GomeetPkg() string {
return strings.TrimSuffix(reflect.TypeOf(Empty{}).PkgPath(), "/utils/project/helpers")
}
func ParseCmd(s string) []string {
r := regexp.MustCompile(`'.*?'|".*?"|\S+`)
res := r.FindAllString(s, -1)
for k, v := range res {
mod := strings.Trim(v, " ")
mod = strings.Trim(mod, "'")
mod = strings.Trim(mod, `"`)
mod = strings.Trim(mod, " ")
res[k] = mod
}
return res
}
func Base(absPath string) string {
rel, err := filepath.Rel(SrcPath(), absPath)
if err != nil {
return filepath.ToSlash(absPath)
}
return filepath.ToSlash(rel)
}
func ExtractPrefix(name, prefix string) (string, string) {
prefix = NormalizeDefaultPrefixes(prefix)
if prefix != "" {
prefixes := strings.Split(prefix, ",")
tv := false
for _, v := range prefixes {
v = strings.Trim(v, " ")
if strings.HasPrefix(name, v) {
name = strings.Replace(name, v, "", -1)
prefix = v
tv = true
break
}
}
if !tv {
prefix = ""
}
}
return prefix, name
}
func LastFromSplit(input, split string) string {
rel := strings.Split(input, split)
return rel[len(rel)-1]
}
func SrcPath() string {
return filepath.Join(os.Getenv("GOPATH"), "src") + string(os.PathSeparator)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
tenant/pkg/webhook/default_server/server.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultserver
import (
"fmt"
"os"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission/builder"
)
var (
log = logf.Log.WithName("default_server")
builderMap = map[string]*builder.WebhookBuilder{}
// HandlerMap contains all admission webhook handlers.
HandlerMap = map[string][]admission.Handler{}
)
// Add adds itself to the manager
func Add(mgr manager.Manager) error {
ns := os.Getenv("POD_NAMESPACE")
if len(ns) == 0 {
ns = "default"
}
secretName := os.Getenv("SECRET_NAME")
if len(secretName) == 0 {
secretName = "webhook-server-secret"
}
temp_bool := true
svr, err := webhook.NewServer("foo-admission-server", mgr, webhook.ServerOptions{
// TODO(user): change the configuration of ServerOptions based on your need.
Port: 9876,
DisableWebhookConfigInstaller: &temp_bool,
})
if err != nil {
return err
}
var webhooks []webhook.Webhook
for k, builder := range builderMap {
handlers, ok := HandlerMap[k]
if !ok {
log.V(1).Info(fmt.Sprintf("can't find handlers for builder: %v", k))
handlers = []admission.Handler{}
}
wh, err := builder.
Handlers(handlers...).
WithManager(mgr).
Build()
if err != nil {
return err
}
webhooks = append(webhooks, wh)
}
return svr.Register(webhooks...)
}
| [
"\"POD_NAMESPACE\"",
"\"SECRET_NAME\""
]
| []
| [
"POD_NAMESPACE",
"SECRET_NAME"
]
| [] | ["POD_NAMESPACE", "SECRET_NAME"] | go | 2 | 0 | |
app.py | from flask import Flask, render_template, request
from sklearn.externals import joblib
import os
import pandas as pd
import numpy as np
app = Flask(__name__, static_url_path='/static/')
@app.route('/')
def form():
return render_template('index.html')
@app.route('/future_rate_prediction')
def firstForm():
return render_template('firstForm.html')
@app.route('/future_rate_prediction/overall_prediction',methods=['POST', 'GET'])
def overall_prediction():
#get the parameters
position = int(request.form['name'])-1
year = float(request.form['year'])
df=pd.read_csv('Data/Processed/Data_with_Potential.csv')
name = str(df.iloc[position,0])
df=df[df['Name']==name]
age = year + df['Age'].tolist()[0]
potential = df['Potential'].tolist()[0]
current_rate = df['Overall'].tolist()[0]
# group similar positions together
forward = ['RS', 'LS', 'RF', 'LF', 'CF', 'ST']
attack_mid = ['RAM', 'LAM', 'CAM']
wings = ['RM', 'RW', 'LM', 'LW']
central_mid = ['CM', 'LCM', 'RCM']
defensive_mid = ['CDM', 'LDM', 'RDM']
fullback = ['RB', 'RWB', 'LB', 'LWB']
cb_def = ['CB', 'LCB', 'RCB']
gk = ['GK']
position = df['Position'].tolist()[0]
if position in forward:
model = joblib.load('Data/Model/Future/Forward_Model.pkl')
elif position in attack_mid:
model = joblib.load('Data/Model/Future/am_Model.pkl')
elif position in wings:
model = joblib.load('Data/Model/Future/Wings_Model.pkl')
elif position in central_mid:
model = joblib.load('Data/Model/Future/Cm_Model.pkl')
elif position in defensive_mid:
model = joblib.load('Data/Model/Future/Dm_Model.pkl')
elif position in fullback:
model = joblib.load('Data/Model/Future/Fullback_Model.pkl')
elif position in cb_def:
model = joblib.load('Data/Model/Future/Cb_Model.pkl')
elif position in gk:
model = joblib.load('Data/Model/Future/Gk_Model.pkl')
prediction = model.predict([[age,potential]])
predicted_overall = prediction.round(1)[0]
return render_template('future_rate_prediction.html', name=str(name), age=int(age), potential=int(potential), current_rate = current_rate, predicted_rate=int(predicted_overall))
@app.route('/current_rate_prediction')
def secondForm():
return render_template('secondForm.html')
@app.route('/current_rate_prediction/overall_prediction', methods=['POST', 'GET'])
def current_overall_prediction():
#get the parameters
Dribbling = float(request.form['Dribbling'])
SprintSpeed = float(request.form['SprintSpeed'])
ShortPassing = float(request.form['ShortPassing'])
LongPassing = float(request.form['LongPassing'])
Strength = float(request.form['Strength'])
position = str(request.form['Position'])
# group similar positions together
forward = ['RS', 'LS', 'RF', 'LF', 'CF', 'ST']
attack_mid = ['RAM', 'LAM', 'CAM']
wings = ['RM', 'RW', 'LM', 'LW']
central_mid = ['CM', 'LCM', 'RCM']
defensive_mid = ['CDM', 'LDM', 'RDM']
fullback = ['RB', 'RWB', 'LB', 'LWB']
cb_def = ['CB', 'LCB', 'RCB']
gk = ['GK']
if position in forward:
model = joblib.load('Data/Model/Current/Forward_Model.pkl')
elif position in attack_mid:
model = joblib.load('Data/Model/Current/am_Model.pkl')
elif position in wings:
model = joblib.load('Data/Model/Current/Wings_Model.pkl')
elif position in central_mid:
model = joblib.load('Data/Model/Current/Cm_Model.pkl')
elif position in defensive_mid:
model = joblib.load('Data/Model/Current/Dm_Model.pkl')
elif position in fullback:
model = joblib.load('Data/Model/Current/Fullback_Model.pkl')
elif position in cb_def:
model = joblib.load('Data/Model/Current/Cb_Model.pkl')
elif position in gk:
model = joblib.load('Data/Model/Current/Gk_Model.pkl')
prediction = model.predict([[Dribbling,SprintSpeed,ShortPassing,LongPassing,Strength]])
predicted_overall = prediction.round(1)[0]
return render_template('current_rate_prediction.html',position=str(position), Dribbling=int(Dribbling), SprintSpeed=int(SprintSpeed), ShortPassing=int(ShortPassing), LongPassing = int(LongPassing), Strength=int(Strength),predicted_overall=predicted_overall)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug = True)
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
mixer/adapter/kubernetesenv/kubernetesenv_test.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetesenv
import (
"context"
"errors"
"net"
"os"
"testing"
"time"
messagediff "gopkg.in/d4l3k/messagediff.v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"istio.io/istio/mixer/adapter/kubernetesenv/config"
kubernetes_apa_tmpl "istio.io/istio/mixer/adapter/kubernetesenv/template"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/adapter/test"
"istio.io/istio/pkg/kube/secretcontroller"
pkgtest "istio.io/istio/pkg/test"
)
const (
testSecretName = "testSecretName"
testSecretNameSpace = "istio-system"
)
type fakeK8sBuilder struct {
calledPath string
calledEnv adapter.Env
}
func (b *fakeK8sBuilder) build(path string, env adapter.Env) (kubernetes.Interface, error) {
b.calledPath = path
b.calledEnv = env
return fake.NewSimpleClientset(), nil
}
func errorClientBuilder(_ string, _ adapter.Env) (kubernetes.Interface, error) {
return nil, errors.New("can't build k8s client")
}
func newFakeBuilder() *builder {
fb := &fakeK8sBuilder{}
return newBuilder(fb.build)
}
// note: not using TestAdapterInvariants here because of kubernetes dependency.
// we are aiming for simple unit testing. a larger, more involved integration
// test / e2e test must be written to validate the builder in relation to a
// real kubernetes cluster.
func TestBuilder(t *testing.T) {
b := newFakeBuilder()
if err := b.Validate(); err != nil {
t.Errorf("ValidateConfig() => builder can't validate its default configuration: %v", err)
}
}
func TestBuilder_BuildAttributesGenerator(t *testing.T) {
tests := []struct {
name string
clientFn clientFactoryFn
conf adapter.Config
wantErr bool
}{
{"success", (&fakeK8sBuilder{}).build, conf, false},
{"builder error", errorClientBuilder, conf, true},
}
for _, v := range tests {
t.Run(v.name, func(t *testing.T) {
b := newBuilder(v.clientFn)
b.SetAdapterConfig(v.conf)
toClose, err := b.Build(context.Background(), test.NewEnv(t))
if err == nil && v.wantErr {
t.Fatal("Expected error building adapter")
}
if err != nil && !v.wantErr {
t.Fatalf("Got error, wanted none: %v", err)
}
if toClose != nil {
if err := toClose.Close(); err != nil {
t.Fatalf("Close() => %v, want success", err)
}
}
})
}
}
func TestBuilder_ControllerCache(t *testing.T) {
b := newFakeBuilder()
for i := 0; i < 10; i++ {
if _, err := b.Build(context.Background(), test.NewEnv(t)); err != nil {
t.Errorf("error in builder: %v", err)
}
}
b.Lock()
defer b.Unlock()
if len(b.controllers) != 1 {
t.Errorf("Got %v controllers, want 1", len(b.controllers))
}
}
// tests closing and rebuilding a handler
func TestHandler_Close(t *testing.T) {
b := newFakeBuilder()
handler, err := b.Build(context.Background(), test.NewEnv(t))
if err != nil {
t.Fatalf("error in builder: %v", err)
}
b.Lock()
if got, want := len(b.controllers), 1; got != want {
t.Errorf("Got %d controllers, want %d", got, want)
}
b.Unlock()
err = handler.Close()
if err != nil {
t.Fatalf("Close() returned unexpected error: %v", err)
}
b.Lock()
// should always have the local controller
if got, want := len(b.controllers), 1; got != want {
t.Errorf("Got %d controllers, want %d", got, want)
}
b.Unlock()
_, err = b.Build(context.Background(), test.NewEnv(t))
if err != nil {
t.Fatalf("error in builder: %v", err)
}
b.Lock()
if got, want := len(b.controllers), 1; got != want {
t.Errorf("Got %d controllers, want %d", got, want)
}
b.Unlock()
}
func TestBuilder_BuildAttributesGeneratorWithEnvVar(t *testing.T) {
testConf := *conf
testConf.KubeconfigPath = "please/override"
tests := []struct {
name string
clientFactory *fakeK8sBuilder
conf adapter.Config
wantOK bool
}{
{"success", &fakeK8sBuilder{}, &testConf, true},
}
wantPath := "/want/kubeconfig"
if err := os.Setenv("KUBECONFIG", wantPath); err != nil {
t.Fatalf("Could not set KUBECONFIG environment var")
}
for _, v := range tests {
t.Run(v.name, func(t *testing.T) {
b := newBuilder(v.clientFactory.build)
b.SetAdapterConfig(v.conf)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
{
_, err := b.Build(ctx, test.NewEnv(t))
gotOK := err == nil
if gotOK != v.wantOK {
t.Fatalf("Got %v, Want %v", err, v.wantOK)
}
if v.clientFactory.calledPath != wantPath {
t.Errorf("Bad kubeconfig path; got %s, want %s", v.clientFactory.calledPath, wantPath)
}
}
})
}
}
func TestKubegen_Generate(t *testing.T) {
builder := newBuilder(func(string, adapter.Env) (kubernetes.Interface, error) {
return fake.NewSimpleClientset(k8sobjs...), nil
})
testPodToNoControllerPodIn := &kubernetes_apa_tmpl.Instance{
SourceUid: "kubernetes://test-pod.testns",
DestinationUid: "kubernetes://no-controller-pod.testns",
}
testPodToNoControllerPodOut := kubernetes_apa_tmpl.NewOutput()
testPodToNoControllerPodOut.SetSourceLabels(map[string]string{"app": "test", "something": ""})
testPodToNoControllerPodOut.SetSourcePodIp(net.ParseIP("10.1.10.1"))
testPodToNoControllerPodOut.SetSourceHostIp(net.ParseIP("10.1.1.10"))
testPodToNoControllerPodOut.SetSourceNamespace("testns")
testPodToNoControllerPodOut.SetSourcePodName("test-pod")
testPodToNoControllerPodOut.SetSourcePodUid("kubernetes://test-pod.testns")
testPodToNoControllerPodOut.SetSourceServiceAccountName("test")
testPodToNoControllerPodOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
testPodToNoControllerPodOut.SetSourceWorkloadName("test-deployment")
testPodToNoControllerPodOut.SetSourceWorkloadNamespace("testns")
testPodToNoControllerPodOut.SetSourceWorkloadUid("istio://testns/workloads/test-deployment")
testPodToNoControllerPodOut.SetDestinationPodName("no-controller-pod")
testPodToNoControllerPodOut.SetDestinationNamespace("testns")
testPodToNoControllerPodOut.SetDestinationPodUid("kubernetes://no-controller-pod.testns")
testPodToNoControllerPodOut.SetDestinationLabels(map[string]string{"app": "some-app"})
// TODO: Is this correct? For non-controlled pods, should we derive workloads at all?
testPodToNoControllerPodOut.SetDestinationWorkloadName("no-controller-pod")
testPodToNoControllerPodOut.SetDestinationWorkloadNamespace("testns")
testPodToNoControllerPodOut.SetDestinationWorkloadUid("istio://testns/workloads/no-controller-pod")
altTestPodToAltTestPod2In := &kubernetes_apa_tmpl.Instance{
SourceUid: "kubernetes://alt-test-pod.testns",
DestinationUid: "kubernetes://alt-test-pod-2.testns",
}
altTestPodToAltTestPod2Out := kubernetes_apa_tmpl.NewOutput()
altTestPodToAltTestPod2Out.SetSourceLabels(map[string]string{"app": "some-app"})
altTestPodToAltTestPod2Out.SetSourceNamespace("testns")
altTestPodToAltTestPod2Out.SetSourcePodName("alt-test-pod")
altTestPodToAltTestPod2Out.SetSourcePodUid("kubernetes://alt-test-pod.testns")
altTestPodToAltTestPod2Out.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
altTestPodToAltTestPod2Out.SetSourceWorkloadName("test-deployment")
altTestPodToAltTestPod2Out.SetSourceWorkloadNamespace("testns")
altTestPodToAltTestPod2Out.SetSourceWorkloadUid("istio://testns/workloads/test-deployment")
altTestPodToAltTestPod2Out.SetDestinationLabels(map[string]string{"app": "some-app"})
altTestPodToAltTestPod2Out.SetDestinationPodName("alt-test-pod-2")
altTestPodToAltTestPod2Out.SetDestinationNamespace("testns")
altTestPodToAltTestPod2Out.SetDestinationPodUid("kubernetes://alt-test-pod-2.testns")
altTestPodToAltTestPod2Out.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
altTestPodToAltTestPod2Out.SetDestinationWorkloadName("test-deployment")
altTestPodToAltTestPod2Out.SetDestinationWorkloadNamespace("testns")
altTestPodToAltTestPod2Out.SetDestinationWorkloadUid("istio://testns/workloads/test-deployment")
daemonsetToReplicationControllerIn := &kubernetes_apa_tmpl.Instance{
SourceUid: "kubernetes://pod-daemonset.testns",
DestinationUid: "kubernetes://pod-replicationcontroller.testns",
}
daemonsetToReplicaControllerOut := kubernetes_apa_tmpl.NewOutput()
daemonsetToReplicaControllerOut.SetSourceLabels(map[string]string{"app": "some-app"})
daemonsetToReplicaControllerOut.SetSourceNamespace("testns")
daemonsetToReplicaControllerOut.SetSourcePodName("pod-daemonset")
daemonsetToReplicaControllerOut.SetSourcePodUid("kubernetes://pod-daemonset.testns")
daemonsetToReplicaControllerOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/daemonsets/test-daemonset")
daemonsetToReplicaControllerOut.SetSourceWorkloadName("test-daemonset")
daemonsetToReplicaControllerOut.SetSourceWorkloadNamespace("testns")
daemonsetToReplicaControllerOut.SetSourceWorkloadUid("istio://testns/workloads/test-daemonset")
daemonsetToReplicaControllerOut.SetDestinationPodName("pod-replicationcontroller")
daemonsetToReplicaControllerOut.SetDestinationNamespace("testns")
daemonsetToReplicaControllerOut.SetDestinationPodUid("kubernetes://pod-replicationcontroller.testns")
daemonsetToReplicaControllerOut.SetDestinationOwner("kubernetes://apis/core/v1/namespaces/testns/replicationcontrollers/test-replicationcontroller")
daemonsetToReplicaControllerOut.SetDestinationLabels(map[string]string{"app": "some-app"})
daemonsetToReplicaControllerOut.SetDestinationWorkloadName("test-replicationcontroller")
daemonsetToReplicaControllerOut.SetDestinationWorkloadNamespace("testns")
daemonsetToReplicaControllerOut.SetDestinationWorkloadUid("istio://testns/workloads/test-replicationcontroller")
ipDestinationSvcIn := &kubernetes_apa_tmpl.Instance{
SourceUid: "kubernetes://pod-job.testns",
DestinationIp: net.ParseIP("192.168.234.3"),
}
ipDestinationOut := kubernetes_apa_tmpl.NewOutput()
ipDestinationOut.SetSourceNamespace("testns")
ipDestinationOut.SetSourcePodName("pod-job")
ipDestinationOut.SetSourcePodUid("kubernetes://pod-job.testns")
ipDestinationOut.SetSourceWorkloadName("test-job")
ipDestinationOut.SetSourceWorkloadNamespace("testns")
ipDestinationOut.SetSourceWorkloadUid("istio://testns/workloads/test-job")
ipDestinationOut.SetSourceOwner("kubernetes://apis/batch/v1/namespaces/testns/jobs/test-job")
ipDestinationOut.SetDestinationLabels(map[string]string{"app": "ipAddr"})
ipDestinationOut.SetDestinationNamespace("testns")
ipDestinationOut.SetDestinationPodName("ip-svc-pod")
ipDestinationOut.SetDestinationPodUid("kubernetes://ip-svc-pod.testns")
ipDestinationOut.SetDestinationPodIp(net.ParseIP("192.168.234.3"))
ipDestinationOut.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
ipDestinationOut.SetDestinationWorkloadName("test-deployment")
ipDestinationOut.SetDestinationWorkloadNamespace("testns")
ipDestinationOut.SetDestinationWorkloadUid("istio://testns/workloads/test-deployment")
notFoundToNoControllerIn := &kubernetes_apa_tmpl.Instance{
SourceUid: "kubernetes://not-found-pod.testns",
DestinationUid: "kubernetes://test-pod.testns",
}
notFoundToNoControllerOut := kubernetes_apa_tmpl.NewOutput()
notFoundToNoControllerOut.SetDestinationLabels(map[string]string{"app": "test", "something": ""})
notFoundToNoControllerOut.SetDestinationPodIp(net.ParseIP("10.1.10.1"))
notFoundToNoControllerOut.SetDestinationHostIp(net.ParseIP("10.1.1.10"))
notFoundToNoControllerOut.SetDestinationNamespace("testns")
notFoundToNoControllerOut.SetDestinationPodName("test-pod")
notFoundToNoControllerOut.SetDestinationPodUid("kubernetes://test-pod.testns")
notFoundToNoControllerOut.SetDestinationServiceAccountName("test")
notFoundToNoControllerOut.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
notFoundToNoControllerOut.SetDestinationWorkloadName("test-deployment")
notFoundToNoControllerOut.SetDestinationWorkloadNamespace("testns")
notFoundToNoControllerOut.SetDestinationWorkloadUid("istio://testns/workloads/test-deployment")
notKubernetesIn := &kubernetes_apa_tmpl.Instance{
SourceUid: "something-else://other-scheme",
}
ipToReplicaSetSvcIn := &kubernetes_apa_tmpl.Instance{
DestinationUid: "kubernetes://replicaset-with-no-deployment-pod.testns",
SourceIp: net.ParseIP("192.168.234.3"),
}
ipToReplicaSetSvcOut := kubernetes_apa_tmpl.NewOutput()
ipToReplicaSetSvcOut.SetSourceLabels(map[string]string{"app": "ipAddr"})
ipToReplicaSetSvcOut.SetSourceNamespace("testns")
ipToReplicaSetSvcOut.SetSourcePodName("ip-svc-pod")
ipToReplicaSetSvcOut.SetSourcePodUid("kubernetes://ip-svc-pod.testns")
ipToReplicaSetSvcOut.SetSourcePodIp(net.ParseIP("192.168.234.3"))
ipToReplicaSetSvcOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
ipToReplicaSetSvcOut.SetSourceWorkloadName("test-deployment")
ipToReplicaSetSvcOut.SetSourceWorkloadNamespace("testns")
ipToReplicaSetSvcOut.SetSourceWorkloadUid("istio://testns/workloads/test-deployment")
ipToReplicaSetSvcOut.SetDestinationLabels(map[string]string{"app": "some-app"})
ipToReplicaSetSvcOut.SetDestinationNamespace("testns")
ipToReplicaSetSvcOut.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/replicasets/not-found-replicaset")
ipToReplicaSetSvcOut.SetDestinationPodName("replicaset-with-no-deployment-pod")
ipToReplicaSetSvcOut.SetDestinationPodUid("kubernetes://replicaset-with-no-deployment-pod.testns")
ipToReplicaSetSvcOut.SetDestinationWorkloadName("not-found-replicaset")
ipToReplicaSetSvcOut.SetDestinationWorkloadNamespace("testns")
ipToReplicaSetSvcOut.SetDestinationWorkloadUid("istio://testns/workloads/not-found-replicaset")
replicasetToReplicaSetIn := &kubernetes_apa_tmpl.Instance{
DestinationUid: "kubernetes://extv1beta1-replicaset-with-no-deployment-pod.testns",
SourceUid: "kubernetes://appsv1beta2-replicaset-with-no-deployment-pod.testns",
}
replicaSetToReplicaSetOut := kubernetes_apa_tmpl.NewOutput()
replicaSetToReplicaSetOut.SetSourceLabels(map[string]string{"app": "some-app"})
replicaSetToReplicaSetOut.SetSourceNamespace("testns")
replicaSetToReplicaSetOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/replicasets/not-found-replicaset")
replicaSetToReplicaSetOut.SetSourcePodName("appsv1beta2-replicaset-with-no-deployment-pod")
replicaSetToReplicaSetOut.SetSourcePodUid("kubernetes://appsv1beta2-replicaset-with-no-deployment-pod.testns")
replicaSetToReplicaSetOut.SetSourceWorkloadName("not-found-replicaset")
replicaSetToReplicaSetOut.SetSourceWorkloadNamespace("testns")
replicaSetToReplicaSetOut.SetSourceWorkloadUid("istio://testns/workloads/not-found-replicaset")
replicaSetToReplicaSetOut.SetDestinationLabels(map[string]string{"app": "some-app"})
replicaSetToReplicaSetOut.SetDestinationNamespace("testns")
replicaSetToReplicaSetOut.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/replicasets/test-replicaset-without-deployment")
replicaSetToReplicaSetOut.SetDestinationPodName("extv1beta1-replicaset-with-no-deployment-pod")
replicaSetToReplicaSetOut.SetDestinationPodUid("kubernetes://extv1beta1-replicaset-with-no-deployment-pod.testns")
replicaSetToReplicaSetOut.SetDestinationWorkloadName("test-replicaset-without-deployment")
replicaSetToReplicaSetOut.SetDestinationWorkloadNamespace("testns")
replicaSetToReplicaSetOut.SetDestinationWorkloadUid("istio://testns/workloads/test-replicaset-without-deployment")
containerNameIn := &kubernetes_apa_tmpl.Instance{
DestinationUid: "kubernetes://pod-with-container.testns",
DestinationPort: 234,
SourceIp: net.ParseIP("192.168.234.3"),
}
containerNameOut := kubernetes_apa_tmpl.NewOutput()
containerNameOut.SetSourceLabels(map[string]string{"app": "ipAddr"})
containerNameOut.SetSourceNamespace("testns")
containerNameOut.SetSourcePodName("ip-svc-pod")
containerNameOut.SetSourcePodUid("kubernetes://ip-svc-pod.testns")
containerNameOut.SetSourcePodIp(net.ParseIP("192.168.234.3"))
containerNameOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
containerNameOut.SetSourceWorkloadName("test-deployment")
containerNameOut.SetSourceWorkloadNamespace("testns")
containerNameOut.SetSourceWorkloadUid("istio://testns/workloads/test-deployment")
containerNameOut.SetDestinationLabels(map[string]string{"app": "container"})
containerNameOut.SetDestinationNamespace("testns")
containerNameOut.SetDestinationOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-container-deployment")
containerNameOut.SetDestinationPodName("pod-with-container")
containerNameOut.SetDestinationPodUid("kubernetes://pod-with-container.testns")
containerNameOut.SetDestinationContainerName("container1")
containerNameOut.SetDestinationWorkloadName("test-container-deployment")
containerNameOut.SetDestinationWorkloadNamespace("testns")
containerNameOut.SetDestinationWorkloadUid("istio://testns/workloads/test-container-deployment")
ipToDeploymentConfigIn := &kubernetes_apa_tmpl.Instance{
SourceIp: net.ParseIP("192.168.234.3"),
DestinationUid: "kubernetes://pod-deploymentconfig.testns",
}
ipToDeploymentConfigOut := kubernetes_apa_tmpl.NewOutput()
ipToDeploymentConfigOut.SetSourceLabels(map[string]string{"app": "ipAddr"})
ipToDeploymentConfigOut.SetSourceNamespace("testns")
ipToDeploymentConfigOut.SetSourcePodName("ip-svc-pod")
ipToDeploymentConfigOut.SetSourcePodUid("kubernetes://ip-svc-pod.testns")
ipToDeploymentConfigOut.SetSourcePodIp(net.ParseIP("192.168.234.3"))
ipToDeploymentConfigOut.SetSourceOwner("kubernetes://apis/apps/v1/namespaces/testns/deployments/test-deployment")
ipToDeploymentConfigOut.SetSourceWorkloadName("test-deployment")
ipToDeploymentConfigOut.SetSourceWorkloadNamespace("testns")
ipToDeploymentConfigOut.SetSourceWorkloadUid("istio://testns/workloads/test-deployment")
ipToDeploymentConfigOut.SetDestinationPodName("pod-deploymentconfig")
ipToDeploymentConfigOut.SetDestinationNamespace("testns")
ipToDeploymentConfigOut.SetDestinationPodUid("kubernetes://pod-deploymentconfig.testns")
ipToDeploymentConfigOut.SetDestinationOwner("kubernetes://apis/apps.openshift.io/v1/namespaces/testns/deploymentconfigs/test-deploymentconfig")
ipToDeploymentConfigOut.SetDestinationLabels(map[string]string{"app": "some-app"})
ipToDeploymentConfigOut.SetDestinationWorkloadName("test-deploymentconfig")
ipToDeploymentConfigOut.SetDestinationWorkloadNamespace("testns")
ipToDeploymentConfigOut.SetDestinationWorkloadUid("istio://testns/workloads/test-deploymentconfig")
tests := []struct {
name string
inputs *kubernetes_apa_tmpl.Instance
want *kubernetes_apa_tmpl.Output
params *config.Params
}{
{"test-pod to no-controller-pod", testPodToNoControllerPodIn, testPodToNoControllerPodOut, conf},
{"alt-test-pod to alt-test-pod-2", altTestPodToAltTestPod2In, altTestPodToAltTestPod2Out, conf},
{"pod-daemonset to pod-replicacontroller", daemonsetToReplicationControllerIn, daemonsetToReplicaControllerOut, conf},
{"not-found-pod to test-pod", notFoundToNoControllerIn, notFoundToNoControllerOut, conf},
{"pod-job to ip-svc-pod", ipDestinationSvcIn, ipDestinationOut, conf},
{"ip-svc-pod to replicaset", ipToReplicaSetSvcIn, ipToReplicaSetSvcOut, conf},
{"replicasets with no deployments", replicasetToReplicaSetIn, replicaSetToReplicaSetOut, conf},
{"not-k8s", notKubernetesIn, kubernetes_apa_tmpl.NewOutput(), conf},
{"ip-svc-pod to pod-with-container", containerNameIn, containerNameOut, conf},
{"ip-svc-pod to deploymentconfig", ipToDeploymentConfigIn, ipToDeploymentConfigOut, conf},
}
for _, v := range tests {
t.Run(v.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
builder.SetAdapterConfig(v.params)
kg, err := builder.Build(ctx, test.NewEnv(t))
if err != nil {
t.Fatal(err)
}
got, err := kg.(*handler).GenerateKubernetesAttributes(ctx, v.inputs)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if diff, equal := messagediff.PrettyDiff(v.want, got); !equal {
t.Errorf("Generate() => %#v\n%s", got, diff)
}
})
}
}
func createMultiClusterSecret(k8s *fake.Clientset) error {
data := map[string][]byte{}
secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: testSecretName,
Namespace: testSecretNameSpace,
Labels: map[string]string{
secretcontroller.MultiClusterSecretLabel: "true",
},
},
Data: map[string][]byte{},
}
data["testRemoteCluster"] = []byte("Test")
secret.Data = data
_, err := k8s.CoreV1().Secrets(testSecretNameSpace).Create(&secret)
return err
}
func deleteMultiClusterSecret(k8s *fake.Clientset) error {
var immediate int64
return k8s.CoreV1().Secrets(testSecretNameSpace).Delete(
testSecretName, &metav1.DeleteOptions{GracePeriodSeconds: &immediate})
}
func verifyControllers(t *testing.T, b *builder, expectedControllerCount int, timeoutName string) {
pkgtest.NewEventualOpts(10*time.Millisecond, 5*time.Second).Eventually(t, timeoutName, func() bool {
b.Lock()
defer b.Unlock()
return len(b.controllers) == expectedControllerCount
})
}
func mockLoadKubeConfig(_ []byte) (*clientcmdapi.Config, error) {
return &clientcmdapi.Config{}, nil
}
func mockValidateClientConfig(_ clientcmdapi.Config) error {
return nil
}
func mockCreateInterfaceFromClusterConfig(_ *clientcmdapi.Config) (kubernetes.Interface, error) {
return fake.NewSimpleClientset(), nil
}
func Test_KubeSecretController(t *testing.T) {
if len(os.Getenv("RACE_TEST")) > 0 {
t.Skip("https://github.com/istio/istio/issues/15610")
}
secretcontroller.LoadKubeConfig = mockLoadKubeConfig
secretcontroller.ValidateClientConfig = mockValidateClientConfig
secretcontroller.CreateInterfaceFromClusterConfig = mockCreateInterfaceFromClusterConfig
clientset := fake.NewSimpleClientset()
b := newBuilder(func(string, adapter.Env) (kubernetes.Interface, error) {
return clientset, nil
})
// Call kube Build function which will start the secret controller.
// Sleep to allow secret process to start.
_, err := b.Build(context.Background(), test.NewEnv(t))
if err != nil {
t.Fatalf("error building adapter: %v", err)
}
time.Sleep(10 * time.Millisecond)
// Create the multicluster secret.
err = createMultiClusterSecret(clientset)
if err != nil {
t.Fatalf("Unexpected error on secret create: %v", err)
}
// Test - Verify that the remote controller has been added.
verifyControllers(t, b, 2, "create remote controller")
// Delete the mulicluster secret.
err = deleteMultiClusterSecret(clientset)
if err != nil {
t.Fatalf("Unexpected error on secret delete: %v", err)
}
// Test - Verify that the remote controller has been removed.
verifyControllers(t, b, 1, "delete remote controller")
}
// Kubernetes Runtime Object for Tests
var trueVar = true
var falseVar = false
var k8sobjs = []runtime.Object{
// replicasets
&appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-replicaset-with-deployment",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "Deployment",
Name: "test-deployment",
},
{
APIVersion: "apps/v1",
Controller: &falseVar,
Kind: "Deployment",
Name: "not-exist-deployment",
},
},
},
},
&appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-replicaset-without-deployment",
Namespace: "testns",
},
},
&appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-appsv1beta2-replicaset-with-deployment",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "Deployment",
Name: "test-deployment",
},
},
},
},
&appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-appsv1-replicaset-with-deployment",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "Deployment",
Name: "test-deployment",
},
},
},
},
&appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-container-name",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "Deployment",
Name: "test-container-deployment",
},
},
},
},
// replicationcontrollers
&v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: "test-replicationcontroller-with-deploymentconfig",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps.openshift.io/v1",
Controller: &trueVar,
Kind: "DeploymentConfig",
Name: "test-deploymentconfig",
},
},
},
},
// pods
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "testns",
Labels: map[string]string{
"app": "test",
"something": "",
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-replicaset-with-deployment",
},
},
},
Status: v1.PodStatus{
HostIP: "10.1.1.10",
PodIP: "10.1.10.1",
},
Spec: v1.PodSpec{
ServiceAccountName: "test",
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "alt-test-pod",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-appsv1-replicaset-with-deployment",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "alt-test-pod-2",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-appsv1beta2-replicaset-with-deployment",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "extv1beta1-replicaset-with-no-deployment-pod",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-replicaset-without-deployment",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "appsv1beta2-replicaset-with-no-deployment-pod",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "not-found-replicaset",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "replicaset-with-no-deployment-pod",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "not-found-replicaset",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "no-controller-pod",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-daemonset",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "DaemonSet",
Name: "test-daemonset",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-replicationcontroller",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "core/v1",
Controller: &trueVar,
Kind: "ReplicationController",
Name: "test-replicationcontroller",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-job",
Namespace: "testns",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "batch/v1",
Controller: &trueVar,
Kind: "Job",
Name: "test-job",
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "ip-svc-pod",
Namespace: "testns",
Labels: map[string]string{"app": "ipAddr"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-appsv1-replicaset-with-deployment",
},
},
},
Status: v1.PodStatus{PodIP: "192.168.234.3"},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-container",
Namespace: "testns",
Labels: map[string]string{"app": "container"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Controller: &trueVar,
Kind: "ReplicaSet",
Name: "test-container-name",
},
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container1", Ports: []v1.ContainerPort{{ContainerPort: 123}, {ContainerPort: 234}}},
{Name: "container2", Ports: []v1.ContainerPort{{ContainerPort: 80}}},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-deploymentconfig",
Namespace: "testns",
Labels: map[string]string{"app": "some-app"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "core/v1",
Controller: &trueVar,
Kind: "ReplicationController",
Name: "test-replicationcontroller-with-deploymentconfig",
},
},
},
},
}
| [
"\"RACE_TEST\""
]
| []
| [
"RACE_TEST"
]
| [] | ["RACE_TEST"] | go | 1 | 0 | |
wl/client.go | package wl
import (
"io/ioutil"
"net"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/elliotmr/gdl/wl/wlp"
"github.com/pkg/errors"
"fmt"
)
type Client struct {
ctx *wlp.Context
compositor *wlp.Compositor
shm *wlp.Shm
shell *wlp.ZxdgShellV6
Screens []*Screen
Windows []*Window
}
type cbListener struct {
*sync.Cond
Data uint32
}
func NewCallbackListener() *cbListener {
return &cbListener{Cond: sync.NewCond(&sync.Mutex{})}
}
func (cbl *cbListener) Done(callbackData uint32) {
cbl.Data = callbackData
cbl.Broadcast()
return
}
// TODO: multiple fds
// Implements ZydgShellV6Listener
func (c *Client) Ping(serial uint32) {
c.shell.Pong(serial)
}
// Implements Shm Listener
func (c *Client) Format(format uint32) {
fmt.Println("Valid Format: ", format)
}
func (c *Client) Connect(sockName string) error {
// TODO: Add support for connecting to an open file descriptor
if sockName == "" {
sockName = os.Getenv("WAYLAND_DISPLAY")
}
if sockName == "" {
sockName = "wayland-0"
}
pathIsAbsolute := sockName[0] == '/'
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if !pathIsAbsolute && runtimeDir == "" {
return errors.New("XDG_RUNTIME_DIR is not set in environment")
}
absSockName := filepath.Join(runtimeDir, sockName)
addr, err := net.ResolveUnixAddr("unix", absSockName)
if err != nil {
return errors.Wrapf(err, "unable to resolve unix socket address (%s)", absSockName)
}
conn, err := net.DialUnix("unix", nil, addr)
if err != nil {
return errors.Wrapf(err, "unable to connect to wayland server at (%s)", absSockName)
}
c.ctx = wlp.NewContext(conn)
c.ctx.Start()
err = c.Roundtrip()
if err != nil {
return errors.Wrap(err, "starting context failed")
}
cmp, err := c.ctx.BindGlobal("wl_compositor", c)
if err != nil {
return errors.Wrap(err, "unable to bind wl_compositor")
}
c.compositor = cmp.(*wlp.Compositor)
shm, err := c.ctx.BindGlobal("wl_shm", c)
if err != nil {
return errors.Wrap(err, "unable to bind wl_shm")
}
c.shm = shm.(*wlp.Shm)
for i := 0; i < c.ctx.NumGlobals("wl_output"); i++ {
scr := &Screen{
Mu: &sync.RWMutex{},
Factor: 1,
}
output, err := c.ctx.BindGlobalIndex("wl_output", scr, i)
if err != nil {
return errors.Wrap(err, "unable to bind wl_output")
}
scr.output = output.(*wlp.Output)
c.Screens = append(c.Screens, scr)
}
shell, err := c.ctx.BindGlobal("zxdg_shell_v6", c)
if err != nil {
return errors.Wrap(err, "unable to bind zxdg_shell_v6")
}
c.shell = shell.(*wlp.ZxdgShellV6)
return c.Roundtrip()
}
// Roundtrip is a convenience wrapper around Sync. It will sleep the calling
// go-routine until all pending wayland commands are processed.
func (c *Client) Roundtrip() error {
cbl := NewCallbackListener()
cbl.L.Lock()
_, err := c.ctx.Display.Sync(cbl)
if err != nil {
return errors.Wrap(err, "unable to create display sync")
}
cbl.Wait()
return c.ctx.Err
}
func (c *Client) CreateWindow() (*Window, error) {
var err error
w := &Window{c: c}
w.buffers[0].w = w
w.buffers[1].w = w
w.scb = &surfaceCb{w: w}
w.Surface, err = c.compositor.CreateSurface(w)
if err != nil {
return nil, errors.Wrap(err, "unable to create surface")
}
w.ZxdgSurfaceV6, err = c.shell.GetXdgSurface(w.scb, w.Surface.ID())
if err != nil {
return nil, errors.Wrap(err, "unable to create xdg_surface")
}
w.ZxdgToplevelV6, err = w.ZxdgSurfaceV6.GetToplevel(w)
if err != nil {
return nil, errors.Wrap(err, "unable to create xdg_toplevel")
}
err = w.InitGraphics()
if err != nil {
return nil, errors.Wrap(err, "unable to init graphics on window")
}
return w, c.Roundtrip()
}
func (c *Client) CreateMemoryPool(size uint32) (*wlp.ShmPool, []byte, error) {
f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "shm")
if err != nil {
return nil, nil, errors.Wrap(err, "unable to create backing file")
}
// leave unlink out for debugging purposes
syscall.Unlink(f.Name())
err = f.Truncate(int64(size))
if err != nil {
return nil, nil, errors.Wrap(err, "unable to resize backing file")
}
data, err := syscall.Mmap(int(f.Fd()), 0, int(size), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
if err != nil {
return nil, nil, errors.Wrap(err, "unable to mmap temp file")
}
pool, err := c.shm.CreatePool(c, f, int32(size * 2))
if err != nil {
return nil, nil, errors.Wrap(err, "unable to create shm pool")
}
return pool, data, nil
}
| [
"\"WAYLAND_DISPLAY\"",
"\"XDG_RUNTIME_DIR\"",
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR",
"WAYLAND_DISPLAY"
]
| [] | ["XDG_RUNTIME_DIR", "WAYLAND_DISPLAY"] | go | 2 | 0 | |
models/unet.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class UNet3D(nn.Module):
def __init__(self, in_channels, out_channels, interpolate=True, conv_layer_order='cbr', init_ch=16):
super(UNet3D, self).__init__()
self.no_class = out_channels
# number of groups for the GroupNorm
# num_groups = min(init_ch // 2, 32)
# encoder path consist of 4 subsequent Encoder modules
# the number of features maps is the same as in the paper
self.encoders = nn.ModuleList([
Encoder(in_channels, init_ch, is_max_pool=False, conv_layer_order=conv_layer_order),
Encoder(init_ch, 2 * init_ch, conv_layer_order=conv_layer_order),
Encoder(2 * init_ch, 4 * init_ch, conv_layer_order=conv_layer_order),
Encoder(4 * init_ch, 8 * init_ch, conv_layer_order=conv_layer_order),
])
self.decoders = nn.ModuleList([
Decoder(4 * init_ch + 8 * init_ch, 4 * init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(2 * init_ch + 4 * init_ch, 2 * init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(init_ch + 2 * init_ch, init_ch, interpolate, conv_layer_order=conv_layer_order)
])
self.final_conv = nn.Sequential(nn.Dropout3d(0.1, False),
nn.Conv3d(init_ch, self.no_class, 1))
def forward(self, x):
# encoder part
encoders_features = []
enc1 = self.encoders[0](x)
enc2 = self.encoders[1](enc1)
enc3 = self.encoders[2](enc2)
mid = self.encoders[3](enc3)
encoders_features = [enc3, enc2, enc1]
dec3 = self.decoders[0](enc3, mid)
dec2 = self.decoders[1](enc2, dec3)
dec1 = self.decoders[2](enc1, dec2)
final = self.final_conv(dec1)
return final
# Some correctly implemented utilities from a github code repository,
# but I don't like them.
class Encoder(nn.Module):
def __init__(self, in_channels, out_channels, conv_kernel_size=3, is_max_pool=True,
max_pool_kernel_size=(2, 2, 2), conv_layer_order='cbr', num_groups=32):
super(Encoder, self).__init__()
self.max_pool = nn.MaxPool3d(kernel_size=max_pool_kernel_size, padding=0) if is_max_pool else None
self.double_conv = DoubleConv(in_channels, out_channels,
kernel_size=conv_kernel_size,
order=conv_layer_order,
num_groups=num_groups)
def forward(self, x):
if self.max_pool is not None:
x = self.max_pool(x)
x = self.double_conv(x)
return x
class Decoder(nn.Module):
def __init__(self, in_channels, out_channels, interpolate, kernel_size=3,
scale_factor=(2, 2, 2), conv_layer_order='cbr', num_groups=32):
super(Decoder, self).__init__()
if interpolate:
self.upsample = None
else:
self.upsample = nn.ConvTranspose3d(2 * out_channels,
2 * out_channels,
kernel_size=kernel_size,
stride=scale_factor,
padding=1,
output_padding=0)
self.double_conv = DoubleConv(in_channels, out_channels,
kernel_size=kernel_size,
order=conv_layer_order,
num_groups=num_groups)
def forward(self, encoder_features, x):
if self.upsample is None:
output_size = encoder_features.size()[2:]
x = F.interpolate(x, size=output_size, mode='trilinear')
else:
x = self.upsample(x)
x = torch.cat((encoder_features, x), dim=1)
x = self.double_conv(x)
return x
class DoubleConv(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, order='cbr', num_groups=32):
super(DoubleConv, self).__init__()
if in_channels < out_channels:
# if in_channels < out_channels we're in the encoder path
conv1_in_channels, conv1_out_channels = in_channels, out_channels // 2
conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
else:
# otherwise we're in the decoder path
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
# conv1
self._add_conv(1, conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups)
# conv2
self._add_conv(2, conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)
def _add_conv(self, pos, in_channels, out_channels, kernel_size, order, num_groups):
assert pos in [1, 2], 'pos MUST be either 1 or 2'
assert 'c' in order, "'c' (conv layer) MUST be present"
assert 'r' in order, "'r' (ReLU layer) MUST be present"
for i, char in enumerate(order):
if char == 'r':
self.add_module(f'relu{pos}', nn.ReLU(inplace=True))
elif char == 'c':
self.add_module(f'conv{pos}', nn.Conv3d(in_channels,
out_channels,
kernel_size,
padding=1))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
self.add_module(f'norm{pos}', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
self.add_module(f'norm{pos}', nn.BatchNorm3d(in_channels))
else:
self.add_module(f'norm{pos}', nn.BatchNorm3d(out_channels))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of 'b', 'r', 'c'")
if __name__ == '__main__':
import time
model = UNet3D(1, 9, init_ch=16, conv_layer_order='cbr', interpolate=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
start = time.time()
summary(model, (1, 160, 160, 64))
print("take {:f} s".format(time.time() - start)) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
core/core.go | // Package core provides common utilities to be used throughout various Lever
// binaries and libraries. These utilities are very specific to Lever.
package core
import (
"os"
"strings"
"github.com/leveros/leveros/config"
)
// PackageName is the name of this package.
const PackageName = "core"
const (
// RPCMethodHandler represents the gRPC method handling Lever RPCs.
RPCMethodHandler = "HandleRPC"
// StreamingRPCMethodHandler represents the gRPC method handling streaming
// Lever RPCs.
StreamingRPCMethodHandler = "HandleStreamingRPC"
)
var (
// InstanceListenPortFlag is the port Lever instances listen on for
// Lever RPCs.
InstanceListenPortFlag = config.DeclareString(
PackageName, "instanceListenPort", "3837")
// InternalEnvironmentSuffixFlag represents the ending of the environment
// host name to which RPCs can be routed to directly (via internal proxies).
InternalEnvironmentSuffixFlag = config.DeclareString(
PackageName, "internalEnvSufix", ".lever")
// DefaultDevAliasFlag is the actual address of the default Lever
// environment used for local development.
DefaultDevAliasFlag = config.DeclareString(
PackageName, "defaultDevAlias", getDefaultLeverOSIPPort())
// DefaultDevEnvFlag is the default Lever environment used for local
// development.
DefaultDevEnvFlag = config.DeclareString(
PackageName, "defaultDevEnv", "dev.lever")
// AdminEnvFlag is the admin Lever environment.
AdminEnvFlag = config.DeclareString(
PackageName, "adminEnv", "admin.lever")
)
func getDefaultLeverOSIPPort() string {
ipPort := os.Getenv("LEVEROS_IP_PORT")
if ipPort != "" {
return ipPort
}
return "127.0.0.1:8080"
}
// IsInternalEnvironment returns true iff the provided environment is part of
// the same Lever deployment (RPCs can be routed internally).
func IsInternalEnvironment(environment string) bool {
suffix := InternalEnvironmentSuffixFlag.Get()
if suffix == "" {
return false
}
return strings.HasSuffix(environment, suffix)
}
// IsAdmin returns true iff the env + service represent the admin service.
func IsAdmin(leverURL *LeverURL) bool {
return leverURL.Environment == AdminEnvFlag.Get() &&
leverURL.Service == "admin"
}
| [
"\"LEVEROS_IP_PORT\""
]
| []
| [
"LEVEROS_IP_PORT"
]
| [] | ["LEVEROS_IP_PORT"] | go | 1 | 0 | |
main.go | package main
import (
"github.com/ngs/ts-dakoku/app"
"github.com/aws/aws-lambda-go/lambda"
)
func main() {
server, err := app.Run()
if err != nil {
panic(err)
}
lambda.Start(server)
}
| []
| []
| []
| [] | [] | go | null | null | null |
settings.py | """
Settings for the mdoel.
"""
import os
# to set the gpu device
os.environ["CUDA_VISIBLE_DEVICES"]="1"
# disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#path to the preprocessed dataset for training
DIR_TRAIN = './train_initialized'
#path to the preprocessed dataset for testing
DIR_TEST = './test_initialized'
#path to store the trained model
MODEL_DIR = './model_saved'
#the number of batch for training KGC model
NB_BATCH_TRIPLE = 200
#batch size of training Distantly Supervised RE model
BATCH_SIZE = 50
#batch size for testing
TESTING_BATCH_SIZE = 50
#epochs for training
MAX_EPOCH = 100
#the maximum number of words in a path
MAX_LENGTH = 120
#hidden feature size
HIDDEN_SIZE = 100
#position embedding size
POSI_SIZE = 5
#learning rate for RE model
LR = 0.02
#learning rate for KGC model
LR_KGC = 0.02
#dropout rate
KEEP_PROB = 0.5
#margin for training KGC model
MARGIN = 1.0
#random seed for initializing weights
SEED = 123
#training strategy: none, pretrain, ranking, pretrain_ranking, locloss
STRATEGY = 'locloss'
#evaluate and save model every n-epoch
CHECKPOINT_EVERY = 2
#ranking attention over top or last n complex paths
RANK_TOPN = 5
#path to store the results
RESULT_DIR = './results'
#precision@top_n prediction
P_AT_N = [500, 1000, 1500]
#address of KG triplets for training
ADDR_KG_Train = 'data/kg_train.txt'
#address of KG trplets for testing
ADDR_KG_Test = 'data/kg_test.txt'
#address of textual triplets
ADDR_TX = 'data/tx.txt'
#address of pretrained word embeddings
ADDR_EMB = 'data/vec.txt'
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
resnet3d/resnet3d.py | """A vanilla 3D resnet implementation.
Based on Raghavendra Kotikalapudi's 2D implementation
keras-resnet (See https://github.com/raghakot/keras-resnet.)
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import six
from math import ceil
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import (
Conv3D,
AveragePooling3D,
MaxPooling3D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
def _bn_relu(input):
"""Helper to build a BN -> relu block (by @raghakot)."""
norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
return Activation("relu")(norm)
def _conv_bn_relu3D(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1, 1))
kernel_initializer = conv_params.setdefault(
"kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer",
l2(1e-4))
def f(input):
conv = Conv3D(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding,
kernel_regularizer=kernel_regularizer)(input)
return _bn_relu(conv)
return f
def _bn_relu_conv3d(**conv_params):
"""Helper to build a BN -> relu -> conv3d block."""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer",
"he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer",
l2(1e-4))
def f(input):
activation = _bn_relu(input)
return Conv3D(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _shortcut3d(input, residual):
"""3D shortcut to match input and residual and merges them with "sum"."""
stride_dim1 = ceil(input._keras_shape[DIM1_AXIS] \
/ residual._keras_shape[DIM1_AXIS])
stride_dim2 = ceil(input._keras_shape[DIM2_AXIS] \
/ residual._keras_shape[DIM2_AXIS])
stride_dim3 = ceil(input._keras_shape[DIM3_AXIS] \
/ residual._keras_shape[DIM3_AXIS])
equal_channels = residual._keras_shape[CHANNEL_AXIS] \
== input._keras_shape[CHANNEL_AXIS]
shortcut = input
if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
or not equal_channels:
shortcut = Conv3D(
filters=residual._keras_shape[CHANNEL_AXIS],
kernel_size=(1, 1, 1),
strides=(stride_dim1, stride_dim2, stride_dim3),
kernel_initializer="he_normal", padding="valid",
kernel_regularizer=l2(1e-4)
)(input)
return add([shortcut, residual])
def _residual_block3d(block_function, filters, kernel_regularizer, repetitions,
is_first_layer=False):
def f(input):
for i in range(repetitions):
strides = (1, 1, 1)
if i == 0 and not is_first_layer:
strides = (2, 2, 2)
input = block_function(filters=filters, strides=strides,
kernel_regularizer=kernel_regularizer,
is_first_block_of_first_layer=(
is_first_layer and i == 0)
)(input)
return input
return f
def basic_block(filters, strides=(1, 1, 1), kernel_regularizer=l2(1e-4),
is_first_block_of_first_layer=False):
"""Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv1 = Conv3D(filters=filters, kernel_size=(3, 3, 3),
strides=strides, padding="same",
kernel_initializer="he_normal",
kernel_regularizer=kernel_regularizer
)(input)
else:
conv1 = _bn_relu_conv3d(filters=filters,
kernel_size=(3, 3, 3),
strides=strides,
kernel_regularizer=kernel_regularizer
)(input)
residual = _bn_relu_conv3d(filters=filters, kernel_size=(3, 3, 3),
kernel_regularizer=kernel_regularizer
)(conv1)
return _shortcut3d(input, residual)
return f
def bottleneck(filters, strides=(1, 1, 1), kernel_regularizer=l2(1e-4),
is_first_block_of_first_layer=False):
"""Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""
def f(input):
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
conv_1_1 = Conv3D(filters=filters, kernel_size=(1, 1, 1),
strides=strides, padding="same",
kernel_initializer="he_normal",
kernel_regularizer=kernel_regularizer
)(input)
else:
conv_1_1 = _bn_relu_conv3d(filters=filters, kernel_size=(1, 1, 1),
strides=strides,
kernel_regularizer=kernel_regularizer
)(input)
conv_3_3 = _bn_relu_conv3d(filters=filters, kernel_size=(3, 3, 3),
kernel_regularizer=kernel_regularizer
)(conv_1_1)
residual = _bn_relu_conv3d(filters=filters * 4, kernel_size=(1, 1, 1),
kernel_regularizer=kernel_regularizer
)(conv_3_3)
return _shortcut3d(input, residual)
return f
def _handle_data_format():
global DIM1_AXIS
global DIM2_AXIS
global DIM3_AXIS
global CHANNEL_AXIS
if K.image_data_format() == 'channels_last':
DIM1_AXIS = 1
DIM2_AXIS = 2
DIM3_AXIS = 3
CHANNEL_AXIS = 4
else:
CHANNEL_AXIS = 1
DIM1_AXIS = 2
DIM2_AXIS = 3
DIM3_AXIS = 4
def _get_block(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
class Resnet3DBuilder(object):
"""ResNet3D."""
@staticmethod
def build(input_shape, num_outputs, block_fn, repetitions, reg_factor):
"""Instantiate a vanilla ResNet3D keras model.
# Arguments
input_shape: Tuple of input shape in the format
(conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'
(filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'
num_outputs: The number of outputs at the final softmax layer
block_fn: Unit block to use {'basic_block', 'bottlenack_block'}
repetitions: Repetitions of unit blocks
# Returns
model: a 3D ResNet model that takes a 5D tensor (volumetric images
in batch) as input and returns a 1D vector (prediction) as output.
"""
_handle_data_format()
if len(input_shape) != 4:
raise ValueError("Input shape should be a tuple "
"(conv_dim1, conv_dim2, conv_dim3, channels) "
"for tensorflow as backend or "
"(channels, conv_dim1, conv_dim2, conv_dim3) "
"for theano as backend")
block_fn = _get_block(block_fn)
input = Input(shape=input_shape)
# first conv
conv1 = _conv_bn_relu3D(filters=64, kernel_size=(7, 7, 7),
strides=(2, 2, 2),
kernel_regularizer=l2(reg_factor)
)(input)
pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),
padding="same")(conv1)
# repeat blocks
block = pool1
filters = 64
for i, r in enumerate(repetitions):
block = _residual_block3d(block_fn, filters=filters,
kernel_regularizer=l2(reg_factor),
repetitions=r, is_first_layer=(i == 0)
)(block)
filters *= 2
# last activation
block_output = _bn_relu(block)
# average poll and classification
pool2 = AveragePooling3D(pool_size=(block._keras_shape[DIM1_AXIS],
block._keras_shape[DIM2_AXIS],
block._keras_shape[DIM3_AXIS]),
strides=(1, 1, 1))(block_output)
flatten1 = Flatten()(pool2)
if num_outputs > 1:
dense = Dense(units=num_outputs,
kernel_initializer="he_normal",
activation="softmax",
kernel_regularizer=l2(reg_factor))(flatten1)
else:
dense = Dense(units=num_outputs,
kernel_initializer="he_normal",
activation="sigmoid",
kernel_regularizer=l2(reg_factor))(flatten1)
model = Model(inputs=input, outputs=dense)
return model
@staticmethod
def build_resnet_18(input_shape, num_outputs, reg_factor=1e-4):
"""Build resnet 18."""
return Resnet3DBuilder.build(input_shape, num_outputs, basic_block,
[2, 2, 2, 2], reg_factor=reg_factor)
@staticmethod
def build_resnet_34(input_shape, num_outputs, reg_factor=1e-4):
"""Build resnet 34."""
return Resnet3DBuilder.build(input_shape, num_outputs, basic_block,
[3, 4, 6, 3], reg_factor=reg_factor)
@staticmethod
def build_resnet_50(input_shape, num_outputs, reg_factor=1e-4):
"""Build resnet 50."""
return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,
[3, 4, 6, 3], reg_factor=reg_factor)
@staticmethod
def build_resnet_101(input_shape, num_outputs, reg_factor=1e-4):
"""Build resnet 101."""
return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,
[3, 4, 23, 3], reg_factor=reg_factor)
@staticmethod
def build_resnet_152(input_shape, num_outputs, reg_factor=1e-4):
"""Build resnet 152."""
return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,
[3, 8, 36, 3], reg_factor=reg_factor)
if __name__ == '__main__':
from keras.optimizers import Adam
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
os.environ['KERAS_BACKEND'] = 'tensorflow'
target_shape = (151, 139, 139, 1)
data_size = 10
num_outputs = 2
model = Resnet3DBuilder.build_resnet_18(target_shape, num_outputs)
adam = Adam(lr=1e-3, amsgrad=True)
model.compile(loss="binary_crossentropy", optimizer=adam, metrics=['accuracy'])
print(model.summary())
# Mimic training
## Data Preparation
sample_data = np.random.random((data_size, *target_shape))
sample_raw_labels = np.random.randint(0, num_outputs, data_size, dtype=int)
sample_labels = np.zeros((data_size, num_outputs))
sample_labels[np.arange(data_size), sample_raw_labels] = 1
## Training
model.fit(sample_data, sample_labels, epochs=2, batch_size=1) | []
| []
| [
"CUDA_VISIBLE_DEVICES",
"KERAS_BACKEND"
]
| [] | ["CUDA_VISIBLE_DEVICES", "KERAS_BACKEND"] | python | 2 | 0 | |
microservice.go | package main
import (
"fmt"
"net/http"
"os"
"github.com/imrancluster/Cloud-Native-Go/api"
)
func main() {
http.HandleFunc("/", index)
http.HandleFunc("/api/echo", api.EchoHandleFunc)
http.HandleFunc("/api/hello", api.HelloHandleFunc)
http.HandleFunc("/api/books", api.BooksHandleFunc)
http.HandleFunc("/api/books/", api.BookHandleFunc)
http.ListenAndServe(port(), nil)
}
func port() string {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
return ":" + port
}
func index(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Welcome to Cloud Native Go (Update).")
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
main_test.go | package gorm_test
import (
"database/sql"
"database/sql/driver"
"fmt"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/erikstmartin/go-testdb"
"github.com/bookreport/gorm"
_ "github.com/bookreport/gorm/dialects/mssql"
_ "github.com/bookreport/gorm/dialects/mysql"
"github.com/bookreport/gorm/dialects/postgres"
_ "github.com/bookreport/gorm/dialects/sqlite"
"github.com/jinzhu/now"
)
var (
DB *gorm.DB
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
}
runMigration()
}
func OpenTestConnection() (db *gorm.DB, err error) {
dbDSN := os.Getenv("GORM_DSN")
switch os.Getenv("GORM_DIALECT") {
case "mysql":
fmt.Println("testing mysql...")
if dbDSN == "" {
dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True"
}
db, err = gorm.Open("mysql", dbDSN)
case "postgres":
fmt.Println("testing postgres...")
if dbDSN == "" {
dbDSN = "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable"
}
db, err = gorm.Open("postgres", dbDSN)
case "mssql":
// CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86';
// CREATE DATABASE gorm;
// USE gorm;
// CREATE USER gorm FROM LOGIN gorm;
// sp_changedbowner 'gorm';
fmt.Println("testing mssql...")
if dbDSN == "" {
dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm"
}
db, err = gorm.Open("mssql", dbDSN)
default:
fmt.Println("testing sqlite3...")
db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db"))
}
// db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// db.SetLogger(log.New(os.Stdout, "\r\n", 0))
if debug := os.Getenv("DEBUG"); debug == "true" {
db.LogMode(true)
} else if debug == "false" {
db.LogMode(false)
}
db.DB().SetMaxIdleConns(10)
return
}
func TestOpen_ReturnsError_WithBadArgs(t *testing.T) {
stringRef := "foo"
testCases := []interface{}{42, time.Now(), &stringRef}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) {
_, err := gorm.Open("postgresql", tc)
if err == nil {
t.Error("Should got error with invalid database source")
}
if !strings.HasPrefix(err.Error(), "invalid database source:") {
t.Errorf("Should got error starting with \"invalid database source:\", but got %q", err.Error())
}
})
}
}
func TestStringPrimaryKey(t *testing.T) {
type UUIDStruct struct {
ID string `gorm:"primary_key"`
Name string
}
DB.DropTable(&UUIDStruct{})
DB.AutoMigrate(&UUIDStruct{})
data := UUIDStruct{ID: "uuid", Name: "hello"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" {
t.Errorf("string primary key should not be populated")
}
data = UUIDStruct{ID: "uuid", Name: "hello world"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" {
t.Errorf("string primary key should not be populated")
}
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
t.Error("No errors should happen if set table for pluck", err)
}
var users []User
if DB.Table("users").Find(&[]User{}).Error != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
// Table should not exist at this point, HasTable should return false
if ok := DB.HasTable("foos"); ok {
t.Errorf("Table should not exist, but does")
}
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
// We create the table
if err := DB.CreateTable(&Foo{}).Error; err != nil {
t.Errorf("Table should be created")
}
// And now it should exits, and HasTable should return true
if ok := DB.HasTable("foos"); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestTableNameConcurrently(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
var wg sync.WaitGroup
wg.Add(10)
for i := 1; i <= 10; i++ {
go func(db *gorm.DB) {
DB.SingularTable(true)
wg.Done()
}(DB)
}
wg.Wait()
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
DB.SingularTable(false)
}
func TestNullValues(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-2", Valid: true},
Gender: &sql.NullString{String: "F", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-3", Valid: false},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestNullValuesWithFirstOrCreate(t *testing.T) {
var nv1 = NullValue{
Name: sql.NullString{String: "first_or_create", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
}
var nv2 NullValue
result := DB.Where(nv1).FirstOrCreate(&nv2)
if result.RowsAffected != 1 {
t.Errorf("RowsAffected should be 1 after create some record")
}
if result.Error != nil {
t.Errorf("Should not raise any error, but got %v", result.Error)
}
if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" {
t.Errorf("first or create with nullvalues")
}
if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil {
t.Errorf("Should not raise any error, but got %v", err)
}
if nv2.Age.Int64 != 18 {
t.Errorf("should update age to 18")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records")
}
}
func TestScanRows(t *testing.T) {
user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
type Result struct {
Name string
Age int
}
var results []Result
for rows.Next() {
var result Result
if err := DB.ScanRows(rows, &result); err != nil {
t.Errorf("should get no error, but got %v", err)
}
results = append(results, result)
}
if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) {
t.Errorf("Should find expected results")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes = &result{}
if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil {
t.Errorf("Scan to pointer of pointer")
}
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
var user = User{
Name: "joins",
CreditCard: CreditCard{Number: "411111111111"},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users1 []User
DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1)
if len(users1) != 2 {
t.Errorf("should find two users using left join")
}
var users2 []User
DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Where("name = ?", "joins").First(&users2)
if len(users2) != 1 {
t.Errorf("should find one users using left join with conditions")
}
var users3 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3)
if len(users3) != 1 {
t.Errorf("should find one users using multiple left join conditions")
}
var users4 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4)
if len(users4) != 0 {
t.Errorf("should find no user when searching with unexisting credit card")
}
var users5 []User
db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5)
if db5.Error != nil {
t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error())
}
}
type JoinedIds struct {
UserID int64 `gorm:"column:id"`
BillingAddressID int64 `gorm:"column:id"`
EmailID int64 `gorm:"column:id"`
}
func TestScanIdenticalColumnNames(t *testing.T) {
var user = User{
Name: "joinsIds",
Email: "[email protected]",
BillingAddress: Address{
Address1: "One Park Place",
},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users []JoinedIds
DB.Select("users.id, addresses.id, emails.id").Table("users").
Joins("left join addresses on users.billing_address_id = addresses.id").
Joins("left join emails on emails.user_id = users.id").
Where("name = ?", "joinsIds").Scan(&users)
if len(users) != 2 {
t.Fatal("should find two rows using left join")
}
if user.Id != users[0].UserID {
t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[0].UserID)
}
if user.Id != users[1].UserID {
t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[1].UserID)
}
if user.BillingAddressID.Int64 != users[0].BillingAddressID {
t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
}
if user.BillingAddressID.Int64 != users[1].BillingAddressID {
t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID)
}
if users[0].EmailID == users[1].EmailID {
t.Errorf("Email ids should be unique. Got %d and %d", users[0].EmailID, users[1].EmailID)
}
if int64(user.Emails[0].Id) != users[0].EmailID && int64(user.Emails[1].Id) != users[0].EmailID {
t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[0].EmailID)
}
if int64(user.Emails[0].Id) != users[1].EmailID && int64(user.Emails[1].Id) != users[1].EmailID {
t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[1].EmailID)
}
}
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join select")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestQueryBuilderSubselectInWhere(t *testing.T) {
user := User{Name: "query_expr_select_ruser1", Email: "[email protected]", Age: 32}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser2", Email: "[email protected]", Age: 16}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("*").Where("name IN (?)", DB.
Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 4 {
t.Errorf("Four users should be found, instead found %d", len(users))
}
DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB.
Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 2 {
t.Errorf("Two users should be found, instead found %d", len(users))
}
}
func TestQueryBuilderRawQueryWithSubquery(t *testing.T) {
user := User{Name: "subquery_test_user1", Age: 10}
DB.Save(&user)
user = User{Name: "subquery_test_user2", Age: 11}
DB.Save(&user)
user = User{Name: "subquery_test_user3", Age: 12}
DB.Save(&user)
var count int
err := DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 2 {
t.Errorf("Row count must be 2, instead got %d", count)
}
err = DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("name LIKE ?", "subquery_test%").
Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 1 {
t.Errorf("Row count must be 1, instead got %d", count)
}
}
func TestQueryBuilderSubselectInHaving(t *testing.T) {
user := User{Name: "query_expr_having_ruser1", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser2", Email: "[email protected]", Age: 128}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB.
Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users)
if len(users) != 1 {
t.Errorf("Two user group should be found, instead found %d", len(users))
}
}
func DialectHasTzSupport() bool {
// NB: mssql and FoundationDB do not support time zones.
if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" {
return false
}
return true
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: &vtime}
if !DialectHasTzSupport() {
// If our driver dialect doesn't support TZ's, just use UTC for everything here.
utcBirthday := user.Birthday.UTC()
user.Birthday = &utcBirthday
}
DB.Save(&user)
expectedBirthday := "2013-02-18 17:51:49 +0000"
foundBirthday := user.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
foundBirthday = findUser.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk postgres.Hstore
}
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error; err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := gorm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("GORM_DIALECT")
db, err := gorm.Open(dialect, DB.DB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound {
t.Errorf("Should have found existing record")
}
}
func TestDdlErrors(t *testing.T) {
var err error
if err = DB.Close(); err != nil {
t.Errorf("Closing DDL test db connection err=%s", err)
}
defer func() {
// Reopen DB connection.
if DB, err = OpenTestConnection(); err != nil {
t.Fatalf("Failed re-opening db connection: %s", err)
}
}()
if err := DB.Find(&User{}).Error; err == nil {
t.Errorf("Expected operation on closed db to produce an error, but err was nil")
}
}
func TestOpenWithOneParameter(t *testing.T) {
db, err := gorm.Open("dialect")
if db != nil {
t.Error("Open with one parameter returned non nil for db")
}
if err == nil {
t.Error("Open with one parameter returned err as nil")
}
}
func TestSaveAssociations(t *testing.T) {
db := DB.New()
deltaAddressCount := 0
if err := db.Model(&Address{}).Count(&deltaAddressCount).Error; err != nil {
t.Errorf("failed to fetch address count")
t.FailNow()
}
placeAddress := &Address{
Address1: "somewhere on earth",
}
ownerAddress1 := &Address{
Address1: "near place address",
}
ownerAddress2 := &Address{
Address1: "address2",
}
db.Create(placeAddress)
addressCountShouldBe := func(t *testing.T, expectedCount int) {
countFromDB := 0
t.Helper()
err := db.Model(&Address{}).Count(&countFromDB).Error
if err != nil {
t.Error("failed to fetch address count")
}
if countFromDB != expectedCount {
t.Errorf("address count mismatch: %d", countFromDB)
}
}
addressCountShouldBe(t, deltaAddressCount+1)
// owner address should be created, place address should be reused
place1 := &Place{
PlaceAddressID: placeAddress.ID,
PlaceAddress: placeAddress,
OwnerAddress: ownerAddress1,
}
err := db.Create(place1).Error
if err != nil {
t.Errorf("failed to store place: %s", err.Error())
}
addressCountShouldBe(t, deltaAddressCount+2)
// owner address should be created again, place address should be reused
place2 := &Place{
PlaceAddressID: placeAddress.ID,
PlaceAddress: &Address{
ID: 777,
Address1: "address1",
},
OwnerAddress: ownerAddress2,
OwnerAddressID: 778,
}
err = db.Create(place2).Error
if err != nil {
t.Errorf("failed to store place: %s", err.Error())
}
addressCountShouldBe(t, deltaAddressCount+3)
count := 0
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
OwnerAddressID: ownerAddress1.ID,
}).Count(&count)
if count != 1 {
t.Errorf("only one instance of (%d, %d) should be available, found: %d",
placeAddress.ID, ownerAddress1.ID, count)
}
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
OwnerAddressID: ownerAddress2.ID,
}).Count(&count)
if count != 1 {
t.Errorf("only one instance of (%d, %d) should be available, found: %d",
placeAddress.ID, ownerAddress2.ID, count)
}
db.Model(&Place{}).Where(&Place{
PlaceAddressID: placeAddress.ID,
}).Count(&count)
if count != 2 {
t.Errorf("two instances of (%d) should be available, found: %d",
placeAddress.ID, count)
}
}
func TestBlockGlobalUpdate(t *testing.T) {
db := DB.New()
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err := db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err != nil {
t.Error("Unexpected error on global update")
}
err = db.Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on global delete")
}
db.BlockGlobalUpdate(true)
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err = db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err == nil {
t.Error("Expected error on global update")
}
err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error
if err != nil {
t.Error("Unxpected error on conditional update")
}
err = db.Delete(&Toy{}).Error
if err == nil {
t.Error("Expected error on global delete")
}
err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on conditional delete")
}
}
func TestCountWithHaving(t *testing.T) {
db := DB.New()
db.Delete(User{})
defer db.Delete(User{})
DB.Create(getPreparedUser("user1", "pluck_user"))
DB.Create(getPreparedUser("user2", "pluck_user"))
user3 := getPreparedUser("user3", "pluck_user")
user3.Languages = []Language{}
DB.Create(user3)
var count int
err := db.Model(User{}).Select("users.id").
Joins("LEFT JOIN user_languages ON user_languages.user_id = users.id").
Joins("LEFT JOIN languages ON user_languages.language_id = languages.id").
Group("users.id").Having("COUNT(languages.id) > 1").Count(&count).Error
if err != nil {
t.Error("Unexpected error on query count with having")
}
if count != 2 {
t.Error("Unexpected result on query count with having")
}
}
func BenchmarkGorm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.Save(&email)
// Query
DB.First(&EmailWithIdx{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
func parseTime(str string) *time.Time {
t := now.New(time.Now().UTC()).MustParse(str)
return &t
}
| [
"\"GORM_DSN\"",
"\"GORM_DIALECT\"",
"\"DEBUG\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT",
"GORM_DSN",
"DEBUG"
]
| [] | ["GORM_DIALECT", "GORM_DSN", "DEBUG"] | go | 3 | 0 | |
scripts/ensemble.py | import os
from pathlib import Path
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from nltk import word_tokenize
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier, RandomForestClassifier, VotingClassifier
from sklearn.metrics import confusion_matrix, classification_report
# Set environment variable in Docker to use correct directory
# if None specified, then default to local machine
try:
in_docker = os.environ["DOCKER"]
except:
in_docker = None
def get_predictive_model():
if in_docker == 'True':
model_dir = '/data/models/'
data_dir = '/data/data/'
else:
model_dir = 'models/'
data_dir = 'data/'
# get model and convert to w2v
glove_input_file = model_dir + 'w2v_glove_300.txt'
word2vec_output_file = '/tmp/w2v.txt'
glove2word2vec(glove_input_file, word2vec_output_file)
model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)
# get stop words
sw = data_dir + "stopwords.txt"
with open(sw) as f:
stop_words = f.read().splitlines()
def get_sentence_vector(sentence):
word_list = word_tokenize(sentence)
word_list = [word.lower() for word in word_list if word.lower() not in stop_words]
word_vectors = []
for x in word_list:
try:
w_vec = model.get_vector(x)
word_vectors.append(w_vec)
except KeyError:
pass
return sum(word_vectors) / len(word_vectors)
# load prepartitioned train/test sets
test = pd.read_csv(data_dir + "test.csv")
train = pd.read_csv(data_dir + "train.csv")
test['vec'] = [get_sentence_vector(x) for x in test.text]
train['vec'] = [get_sentence_vector(x) for x in train.text]
train_grouped_abbr = train.groupby('abbrev')
test_grouped_abbr = test.groupby('abbrev')
# load full data set
frames = [test, train]
df = pd.concat(frames)
print("running voting for each acronym")
# Loop through different abbreviations
for abbr in train.abbrev.unique():
train_abbr = train_grouped_abbr.get_group(abbr)
test_abbr = test_grouped_abbr.get_group(abbr)
X_train = np.array(list(train_abbr.vec))
y_train = train_abbr.expansion
X_test = np.array(list(test_abbr.vec))
y_test = test_abbr.expansion
# Support Vector Machine
svm = SVC(C=1.0, kernel='linear', degree=1)
# Logistic Regression
lr = LogisticRegression()
# Multilayer Perceptron
mlp = MLPClassifier()
# Bagging
bag = BaggingClassifier(tree.DecisionTreeClassifier(random_state=1))
# Boosting
num_trees = 70
boost = AdaBoostClassifier(n_estimators=num_trees, random_state=1032).fit(X_train, y_train)
# Random Forest
rf = RandomForestClassifier()
estimators = [('svm', svm), ('logistic_regression', lr), ('mlp', mlp), ('bagging', bag), ('boosting', boost), ('random_forest', rf)]
# ensembled classifier
ensemble = VotingClassifier(estimators).fit(X_train, y_train)
pred = ensemble.predict(X_test)
output_dir = Path(data_dir + "output")
output_dir.mkdir(parents=True, exist_ok=True)
(pd.DataFrame({'predictions':pred})).to_csv(output_dir / "ensemble_{}.csv".format(abbr))
cm = confusion_matrix(y_test, pred, labels=list(set(df.expansion)))
print()
print("MODEL -> ENSEMBLE")
print("##" * 20)
print(" " * 20 + abbr)
print("##" * 20)
print(classification_report(y_test, pred))
print()
print(f'examples (first 5 cases)\t\t\t\t\t\ttrue_abbr\t\t\tpred_abbr')
# Print first 5 cases
i = 0
for input_row, true_abbr, pred_abbr in zip(test_abbr.iterrows(), y_test, pred):
sn_start = max(input_row[1].start - 25, 0)
sn_end = min(input_row[1].end + 25, len(input_row[1].text))
example_text = input_row[1].text[sn_start: sn_end]
print(f'... {example_text} ...\t{true_abbr:<35}\t{pred_abbr}')
if i == 5:
break
i += 1
if __name__ == '__main__':
print("Running ensemble")
get_predictive_model()
| []
| []
| [
"DOCKER"
]
| [] | ["DOCKER"] | python | 1 | 0 | |
coffeebot/config.py | import os
__DIR__ = os.path.dirname(__file__)
if os.getenv('COFFEEBOT_DATABASE_URI'):
DATABASE_URI = os.getenv('COFFEEBOT_DATABASE_URI')
else:
DATABASE_URI = 'sqlite:////' + __DIR__ + '/../' + \
os.getenv('COFFEEBOT_DATABASE_FILENAME')
DEBUG = int(os.getenv('COFFEEBOT_DEBUG', 0))
URL = os.getenv('COFFEEBOT_MATTERMOST_URL')
PORT = int(os.getenv('COFFEEBOT_MATTERMOST_PORT'))
USERNAME = os.getenv('COFFEEBOT_MATTERMOST_USERNAME')
PASSWORD = os.getenv('COFFEEBOT_MATTERMOST_PASSWORD')
TOKEN = os.getenv('COFFEEBOT_MATTERMOST_TOKEN')
TEAM_NAME = os.getenv('COFFEEBOT_MATTERMOST_TEAM')
CHANNEL_NAME = os.getenv('COFFEEBOT_MATTERMOST_CHANNEL')
IGNORED_USER_IDS = str(os.getenv('COFFEEBOT_MATTERMOST_IGNORE_USER_IDS')).split(',')
MESSAGE = """
You have been matched to meet up! Please respond with your availabilities :)
"""
| []
| []
| [
"COFFEEBOT_DATABASE_FILENAME",
"COFFEEBOT_MATTERMOST_TOKEN",
"COFFEEBOT_MATTERMOST_PORT",
"COFFEEBOT_DATABASE_URI",
"COFFEEBOT_DEBUG",
"COFFEEBOT_MATTERMOST_PASSWORD",
"COFFEEBOT_MATTERMOST_CHANNEL",
"COFFEEBOT_MATTERMOST_TEAM",
"COFFEEBOT_MATTERMOST_USERNAME",
"COFFEEBOT_MATTERMOST_URL",
"COFFEEBOT_MATTERMOST_IGNORE_USER_IDS"
]
| [] | ["COFFEEBOT_DATABASE_FILENAME", "COFFEEBOT_MATTERMOST_TOKEN", "COFFEEBOT_MATTERMOST_PORT", "COFFEEBOT_DATABASE_URI", "COFFEEBOT_DEBUG", "COFFEEBOT_MATTERMOST_PASSWORD", "COFFEEBOT_MATTERMOST_CHANNEL", "COFFEEBOT_MATTERMOST_TEAM", "COFFEEBOT_MATTERMOST_USERNAME", "COFFEEBOT_MATTERMOST_URL", "COFFEEBOT_MATTERMOST_IGNORE_USER_IDS"] | python | 11 | 0 | |
doc/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ArviZ documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 11 18:33:59 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
from typing import Dict
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import arviz
arviz.rcParams["data.load"] = "eager"
arviz.Numba.disable_numba()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath("../sphinxext"))
thumb_directory = "example_thumbs"
if not os.path.isdir(thumb_directory):
os.mkdir(thumb_directory)
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"matplotlib.sphinxext.plot_directive",
"bokeh.sphinxext.bokeh_plot",
"numpydoc",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"gallery_generator",
"myst_nb",
"sphinx_panels",
"notfound.extension",
]
# ipython directive configuration
ipython_warning_is_error = False
# Copy plot options from Seaborn
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Generate API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["../_templates"]
#
# MyST related params
jupyter_execute_notebooks = "auto"
execution_excludepatterns = ["*.ipynb"]
myst_heading_anchors = 3
panels_add_bootstrap_css = False
# The base toctree document.
master_doc = "index"
# General information about the project.
project = "ArviZ"
copyright = "2018, ArviZ devs"
author = "ArviZ devs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
branch_name = os.environ.get("BUILD_SOURCEBRANCHNAME", "")
if branch_name == "main":
version = "dev"
else:
# The short X.Y version.
version = arviz.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "build", "Thumbs.db", ".DS_Store", "notebooks/.ipynb_checkpoints"]
# configure notfound extension to not add any prefix to the urls
notfound_urls_prefix = "/arviz/"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/arviz-devs/arviz",
"icon": "fab fa-github-square",
},
{
"name": "Twitter",
"url": "https://twitter.com/arviz_devs",
"icon": "fab fa-twitter-square",
},
],
"navbar_start": ["navbar-logo", "navbar-version"],
"use_edit_page_button": False, # TODO: see how to skip of fix for generated pages
"google_analytics_id": "G-W1G68W77YV",
}
html_context = {
"github_user": "arviz-devs",
"github_repo": "arviz",
"github_version": "main",
"doc_path": "doc/source/",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_static_path = ["_static", thumb_directory]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
# html_sidebars = {}
# use additional pages to add a 404 page
html_additional_pages = {
"404": "404.html",
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ArviZdoc"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "ArviZ"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "ArviZ.tex", "ArviZ Documentation", "ArviZ devs", "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "arviz", "ArviZ Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ArviZ",
"ArviZ Documentation",
author,
"ArviZ",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# Example configuration for intersphinx
intersphinx_mapping = {
"xarray": ("http://xarray.pydata.org/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"pymc3": ("https://docs.pymc.io/", None),
"mpl": ("https://matplotlib.org/", None),
"bokeh": ("https://docs.bokeh.org/en/latest/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"zarr": ("https://zarr.readthedocs.io/en/stable/", None),
}
| []
| []
| [
"BUILD_SOURCEBRANCHNAME"
]
| [] | ["BUILD_SOURCEBRANCHNAME"] | python | 1 | 0 | |
src/java/API/NTHAPI/SitesResource.java | /**.Copyright 2016, University of Messina.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package API.NTHAPI;
//import API.NTHAPI.SiteResource;
import OSFFMIDM.SimpleIDM;
import java.io.File;
import javax.servlet.ServletContext;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.UriInfo;
import org.apache.log4j.Logger;
import org.jdom2.Element;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import utils.ParserXML;
/**
* REST Web Service for Site information management
*
* @author Giuseppe Tricomi
*/
@Path("/fednet/northBr/site")
public class SitesResource {
@Context
private UriInfo context;
private ParserXML parser;
private String fedSDNTarget; //it will be used to make request to web service with Client4WS class
static final Logger LOGGER = Logger.getLogger(SitesResource.class);
/**
* Creates a new instance of SitesResource
*/
public SitesResource() {
//String file=System.getenv("HOME");
//this.init(file+"/webapps/OSFFM/WEB-INF/Configuration_NTHBR_WS.xml");
this.init("/home/beacon/beaconConf/Configuration_NTHBR_WS.xml");
}
/**
*
* @param file File that contains the FEDSDN URI.
* @author gtricomi
*/
public void init(String file) {
Element params;
try {
parser = new ParserXML(new File(file));
params = parser.getRootElement().getChild("pluginParams");
fedSDNTarget = params.getChildText("fedSDNTarget");
} //init();
catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Retrieves list of site OS federated
* @return an instance of java.lang.String
* @author gtricomi
*/
@GET
@Produces("application/json")
public String getFederatedSites() {
//TODO return proper representation object
JSONArray arr=new JSONArray();
//while(??){
JSONObject element=new JSONObject();//BEACON>>> ADD LOGIC HERE
element.put("uuid", "Value1");
element.put("name", "Value2");
element.put("Available4Tenant",true); // or false
arr.add(element);
//}
JSONObject reply=new JSONObject();
reply.put("response", arr);
reply.put("returncode", 0); // or reply.put("returncode", 1);
reply.put("errormesg", "None"); //or reply.put("errormesg", "Mesg");
return reply.toJSONString();
}
/**
*
* @param siteid
* @return
* @author gtricomi
*/
@GET
@Path("/{site_id}")
@Produces("application/json")
public String getSiteInfo(@PathParam("site_id") String siteid) {
JSONObject reply=new JSONObject();
//TODO return proper representation object
LOGGER.error("This is a logging statement from log4j");
try{
SimpleIDM si=new SimpleIDM();//BEACON>>> THe logic need to be implemented
}
catch(Exception ec){
LOGGER.error(ec.getMessage());
ec.printStackTrace();
}
reply.put("uuid", "Value1_"+siteid);
reply.put("name", "Value2");
reply.put("location", "Value3");
reply.put("Available4Tenant",true); // or false
reply.put("returncode", 0); //or reply.put("returncode", 1);
reply.put("errormesg", "None"); //or reply.put("errormesg", "Mesg");
return reply.toJSONString();
}
/**
* PUT method to insert a new site on Federation data space
* @return
* @author gtricomi
*/
@PUT
@Produces("application/json")
@Consumes(MediaType.APPLICATION_JSON)
public String putSite(String value){
//the logic and the information have to be designed
JSONObject j=new JSONObject();
JSONParser jp=new JSONParser();
try{
j=(JSONObject)jp.parse(value); //BEACON>>> THe logic need to be implemented
}catch(Exception e){}
return j.toJSONString();//questo è lo scheletro và modificato
}
/**
* DELETE method for resource Users
* @param userid
* @return
* @author gtricomi
*/
@DELETE
@Path("/{site_id}")
@Produces("application/json")
public String deleteSite(@PathParam("siteid") String siteid) {
JSONObject reply=new JSONObject();
//something TODO 4 delete Logic//BEACON>>> THe logic need to be implemented
reply.put("returncode", 0); // or reply.put("returncode", 1);
reply.put("errormesg", "None"); //or reply.put("errormesg", "Mesg");
return reply.toJSONString();
}
/**
* Sub-resource locator method for site
*/
@Path("site")
public SiteResource getSiteResource() {
return SiteResource.getInstance();
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | java | 1 | 0 | |
src/test/java/org/verdictdb/VerdictDBAggNullValueTest.java | package org.verdictdb;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.verdictdb.commons.DatabaseConnectionHelpers;
import org.verdictdb.commons.VerdictOption;
import org.verdictdb.connection.CachedDbmsConnection;
import org.verdictdb.connection.DbmsConnection;
import org.verdictdb.connection.JdbcConnection;
import org.verdictdb.coordinator.ScramblingCoordinator;
import org.verdictdb.coordinator.SelectQueryCoordinator;
import org.verdictdb.coordinator.VerdictResultStreamFromExecutionResultReader;
import org.verdictdb.core.resulthandler.ExecutionResultReader;
import org.verdictdb.core.scrambling.ScrambleMeta;
import org.verdictdb.core.scrambling.ScrambleMetaSet;
import org.verdictdb.exception.VerdictDBException;
import org.verdictdb.sqlsyntax.MysqlSyntax;
/**
* This test is to check NULL value is returned when no row is selected by sum() or avg().
*/
public class VerdictDBAggNullValueTest {
// lineitem has 10 blocks, orders has 3 blocks;
// lineitem join orders has 12 blocks
static final int blockSize = 100;
static ScrambleMetaSet meta = new ScrambleMetaSet();
static VerdictOption options = new VerdictOption();
static Connection conn;
private static Statement stmt;
private static final String MYSQL_HOST;
static {
String env = System.getenv("BUILD_ENV");
if (env != null && env.equals("GitLab")) {
MYSQL_HOST = "mysql";
} else {
MYSQL_HOST = "localhost";
}
}
private static final String MYSQL_DATABASE =
"mysql_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase();
private static final String MYSQL_UESR = "root";
private static final String MYSQL_PASSWORD = "";
@BeforeClass
public static void setupMySqlDatabase() throws SQLException, VerdictDBException {
String mysqlConnectionString =
String.format("jdbc:mysql://%s?autoReconnect=true&useSSL=false", MYSQL_HOST);
conn =
DatabaseConnectionHelpers.setupMySql(
mysqlConnectionString, MYSQL_UESR, MYSQL_PASSWORD, MYSQL_DATABASE);
conn.setCatalog(MYSQL_DATABASE);
stmt = conn.createStatement();
stmt.execute(String.format("use `%s`", MYSQL_DATABASE));
DbmsConnection dbmsConn = JdbcConnection.create(conn);
// Create Scramble table
dbmsConn.execute(
String.format("DROP TABLE IF EXISTS `%s`.`lineitem_scrambled`", MYSQL_DATABASE));
dbmsConn.execute(String.format("DROP TABLE IF EXISTS `%s`.`orders_scrambled`", MYSQL_DATABASE));
ScramblingCoordinator scrambler =
new ScramblingCoordinator(dbmsConn, MYSQL_DATABASE, MYSQL_DATABASE, (long) 100);
ScrambleMeta meta1 =
scrambler.scramble(
MYSQL_DATABASE, "lineitem", MYSQL_DATABASE, "lineitem_scrambled", "uniform");
ScrambleMeta meta2 =
scrambler.scramble(MYSQL_DATABASE, "orders", MYSQL_DATABASE, "orders_scrambled", "uniform");
meta.addScrambleMeta(meta1);
meta.addScrambleMeta(meta2);
stmt.execute(String.format("drop schema if exists `%s`", options.getVerdictTempSchemaName()));
stmt.execute(
String.format("create schema if not exists `%s`", options.getVerdictTempSchemaName()));
}
@Test
public void testAvg() throws VerdictDBException {
// This query doesn't select any rows.
String sql = String.format(
"select avg(l_extendedprice) from " +
"%s.lineitem, %s.customer, %s.orders " +
"where c_mktsegment='AAAAAA' and c_custkey=o_custkey and o_orderkey=l_orderkey",
MYSQL_DATABASE, MYSQL_DATABASE, MYSQL_DATABASE);
JdbcConnection jdbcConn = new JdbcConnection(conn, new MysqlSyntax());
jdbcConn.setOutputDebugMessage(true);
DbmsConnection dbmsconn = new CachedDbmsConnection(jdbcConn);
dbmsconn.setDefaultSchema(MYSQL_DATABASE);
SelectQueryCoordinator coordinator = new SelectQueryCoordinator(dbmsconn);
coordinator.setScrambleMetaSet(meta);
ExecutionResultReader reader = coordinator.process(sql);
VerdictResultStream stream = new VerdictResultStreamFromExecutionResultReader(reader);
try {
while (stream.hasNext()) {
VerdictSingleResult rs = stream.next();
rs.next();
assertNull(rs.getValue(0));
assertEquals(0, rs.getDouble(0), 0);
assertEquals(0, rs.getInt(0));
}
} catch (RuntimeException e) {
throw e;
}
}
@Test
public void testSum() throws VerdictDBException {
// This query doesn't select any rows.
String sql = String.format(
"select sum(l_extendedprice) from " +
"%s.lineitem, %s.customer, %s.orders " +
"where c_mktsegment='AAAAAA' and c_custkey=o_custkey and o_orderkey=l_orderkey",
MYSQL_DATABASE, MYSQL_DATABASE, MYSQL_DATABASE);
JdbcConnection jdbcConn = new JdbcConnection(conn, new MysqlSyntax());
jdbcConn.setOutputDebugMessage(true);
DbmsConnection dbmsconn = new CachedDbmsConnection(jdbcConn);
dbmsconn.setDefaultSchema(MYSQL_DATABASE);
SelectQueryCoordinator coordinator = new SelectQueryCoordinator(dbmsconn);
coordinator.setScrambleMetaSet(meta);
ExecutionResultReader reader = coordinator.process(sql);
VerdictResultStream stream = new VerdictResultStreamFromExecutionResultReader(reader);
try {
while (stream.hasNext()) {
VerdictSingleResult rs = stream.next();
rs.next();
assertNull(rs.getValue(0));
assertEquals(0, rs.getDouble(0), 0);
assertEquals(0, rs.getInt(0));
}
} catch (RuntimeException e) {
throw e;
}
}
@Test
public void testSumAvg() throws VerdictDBException {
// This query doesn't select any rows.
String sql = String.format(
"select sum(l_extendedprice), avg(l_extendedprice) from " +
"%s.lineitem, %s.customer, %s.orders " +
"where c_mktsegment='AAAAAA' and c_custkey=o_custkey and o_orderkey=l_orderkey",
MYSQL_DATABASE, MYSQL_DATABASE, MYSQL_DATABASE);
JdbcConnection jdbcConn = new JdbcConnection(conn, new MysqlSyntax());
jdbcConn.setOutputDebugMessage(true);
DbmsConnection dbmsconn = new CachedDbmsConnection(jdbcConn);
dbmsconn.setDefaultSchema(MYSQL_DATABASE);
SelectQueryCoordinator coordinator = new SelectQueryCoordinator(dbmsconn);
coordinator.setScrambleMetaSet(meta);
ExecutionResultReader reader = coordinator.process(sql);
VerdictResultStream stream = new VerdictResultStreamFromExecutionResultReader(reader);
try {
while (stream.hasNext()) {
VerdictSingleResult rs = stream.next();
rs.next();
assertNull(rs.getValue(0));
assertEquals(0, rs.getDouble(0), 0);
assertEquals(0, rs.getInt(0));
assertNull(rs.getValue(1));
assertEquals(0, rs.getDouble(1), 0);
assertEquals(0, rs.getInt(1));
}
} catch (RuntimeException e) {
throw e;
}
}
@Test
public void testCount() throws VerdictDBException {
// This query doesn't select any rows.
String sql = String.format(
"select count(l_orderkey) from " +
"%s.lineitem, %s.customer, %s.orders " +
"where c_mktsegment='AAAAAA' and c_custkey=o_custkey and o_orderkey=l_orderkey",
MYSQL_DATABASE, MYSQL_DATABASE, MYSQL_DATABASE);
JdbcConnection jdbcConn = new JdbcConnection(conn, new MysqlSyntax());
jdbcConn.setOutputDebugMessage(true);
DbmsConnection dbmsconn = new CachedDbmsConnection(jdbcConn);
dbmsconn.setDefaultSchema(MYSQL_DATABASE);
SelectQueryCoordinator coordinator = new SelectQueryCoordinator(dbmsconn);
coordinator.setScrambleMetaSet(meta);
ExecutionResultReader reader = coordinator.process(sql);
VerdictResultStream stream = new VerdictResultStreamFromExecutionResultReader(reader);
try {
while (stream.hasNext()) {
VerdictSingleResult rs = stream.next();
rs.next();
assertEquals(0, rs.getDouble(0), 0);
assertEquals(0, rs.getInt(0));
}
} catch (RuntimeException e) {
throw e;
}
}
@AfterClass
public static void tearDown() throws SQLException {
stmt.execute(String.format("DROP SCHEMA IF EXISTS `%s`", MYSQL_DATABASE));
}
}
| [
"\"BUILD_ENV\""
]
| []
| [
"BUILD_ENV"
]
| [] | ["BUILD_ENV"] | java | 1 | 0 | |
client/ddexClient.go | package client
import (
"auctionBidder/utils"
"auctionBidder/web3"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/shopspring/decimal"
"github.com/sirupsen/logrus"
"os"
"strconv"
"strings"
"time"
)
type SimpleOrder struct {
Amount decimal.Decimal
Price decimal.Decimal
Side string
}
type OrderRes struct {
Id string
Status string
Side string
Amount decimal.Decimal
Price decimal.Decimal
AvailableAmount decimal.Decimal
FilledAmount decimal.Decimal
AvgPrice decimal.Decimal
}
type Balance struct {
Free decimal.Decimal
Lock decimal.Decimal
Total decimal.Decimal
}
type Inventory map[string]*Balance // symbol -> Balance
type Asset struct {
Symbol string
Address string
Decimal int32
}
type Market struct {
Base Asset
Quote Asset
PricePrecision int
PriceDecimal int
AmountDecimal int
}
type DdexClient struct {
Address string
Assets map[string]*Asset // symbol -> Asset
Markets map[string]*Market // "ETH-DAI" -> Market
hydroContract *web3.Contract
privateKey string
signCache string
lastSignTime int64
baseUrl string
}
func NewDdexClient(privateKey string) (client *DdexClient, err error) {
ethereumNodeUrl := os.Getenv("ETHEREUM_NODE_URL")
ddexBaseUrl := os.Getenv("DDEX_URL")
hydroContractAddress := os.Getenv("HYDRO_CONTRACT_ADDRESS")
web3 := web3.NewWeb3(ethereumNodeUrl)
address, err := web3.AddPrivateKey(privateKey)
if err != nil {
return
}
contract, err := web3.NewContract(utils.HydroAbi, hydroContractAddress)
if err != nil {
return
}
// get market meta data
var dataContainer IMarkets
resp, err := utils.Get(
utils.JoinUrlPath(ddexBaseUrl, fmt.Sprintf("markets")),
"",
utils.EmptyKeyPairList,
[]utils.KeyPair{{"Content-Type", "application/json"}})
if err != nil {
logrus.Error("call " + utils.JoinUrlPath(ddexBaseUrl, fmt.Sprintf("markets")) + " failed")
return
}
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
return
}
assets := map[string]*Asset{}
markets := map[string]*Market{}
for _, market := range dataContainer.Data.Markets {
if _, ok := assets[market.BaseAssetName]; !ok {
assets[market.BaseAssetName] = &Asset{market.BaseAssetName, market.BaseAssetAddress, int32(market.BaseAssetDecimals)}
}
if _, ok := assets[market.QuoteAssetName]; !ok {
assets[market.QuoteAssetName] = &Asset{market.QuoteAssetName, market.QuoteAssetAddress, int32(market.QuoteAssetDecimals)}
}
markets[fmt.Sprintf("%s-%s", market.BaseAssetName, market.QuoteAssetName)] = &Market{
*assets[market.BaseAssetName],
*assets[market.QuoteAssetName],
market.PricePrecision,
market.PriceDecimals,
market.AmountDecimals,
}
}
client = &DdexClient{
address,
assets,
markets,
contract,
privateKey,
"",
0,
ddexBaseUrl,
}
return
}
func (client *DdexClient) updateSignCache() {
now := utils.MillisecondTimestamp()
if client.lastSignTime < now-200000 {
messageStr := "HYDRO-AUTHENTICATION@" + strconv.Itoa(int(now))
signRes, _ := utils.PersonalSign([]byte(messageStr), client.privateKey)
client.signCache = fmt.Sprintf("%s#%s#0x%x", strings.ToLower(client.Address), messageStr, signRes)
client.lastSignTime = now
}
}
func (client *DdexClient) signOrderId(orderId string) string {
orderIdBytes, _ := hex.DecodeString(strings.TrimPrefix(orderId, "0x"))
signature, _ := utils.PersonalSign(orderIdBytes, client.privateKey)
return "0x" + hex.EncodeToString(signature)
}
func (client *DdexClient) get(path string, params []utils.KeyPair) (string, error) {
client.updateSignCache()
return utils.Get(
utils.JoinUrlPath(client.baseUrl, path),
"",
params,
[]utils.KeyPair{
{"Hydro-Authentication", client.signCache},
{"Content-Type", "application/json"},
},
)
}
func (client *DdexClient) post(path string, body string, params []utils.KeyPair) (string, error) {
client.updateSignCache()
return utils.Post(
utils.JoinUrlPath(client.baseUrl, path),
body,
params,
[]utils.KeyPair{
{"Hydro-Authentication", client.signCache},
{"Content-Type", "application/json"},
},
)
}
func (client *DdexClient) delete(path string, params []utils.KeyPair) (string, error) {
client.updateSignCache()
return utils.Delete(
utils.JoinUrlPath(client.baseUrl, path),
"",
params,
[]utils.KeyPair{
{"Hydro-Authentication", client.signCache},
{"Content-Type", "application/json"},
},
)
}
func (client *DdexClient) buildUnsignedOrder(
tradingPair string,
price decimal.Decimal,
amount decimal.Decimal,
side string,
orderType string,
isMakerOnly bool,
expireTimeInSecond int64) (orderId string, err error) {
var dataContainer IBuildOrder
var body = struct {
MarketId string `json:"marketId"`
Side string `json:"side"`
OrderType string `json:"orderType"`
Price decimal.Decimal `json:"price"`
Amount decimal.Decimal `json:"amount"`
Expires int64 `json:"expires"`
IsMakerOnly bool `json:"isMakerOnly"`
WalletType string `json:"walletType"`
}{
tradingPair,
side,
orderType,
price,
amount,
expireTimeInSecond,
isMakerOnly,
"trading",
}
bodyBytes, _ := json.Marshal(body)
resp, err := client.post("orders/build", string(bodyBytes), utils.EmptyKeyPairList)
if err != nil {
return "", err
}
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
} else {
orderId = dataContainer.Data.Order.ID
}
return
}
func (client *DdexClient) placeOrder(orderId string) (res *OrderRes, err error) {
var body = struct {
OrderId string `json:"orderId"`
Signature string `json:"signature"`
}{orderId, client.signOrderId(orderId)}
bodyBytes, _ := json.Marshal(body)
resp, err := client.post("orders", string(bodyBytes), utils.EmptyKeyPairList)
if err != nil {
return
}
var dataContainer IPlaceOrderSync
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
} else {
res = client.parseDdexOrderResp(dataContainer.Data.Order)
}
return
}
func (client *DdexClient) CreateLimitOrder(
tradingPair string,
price decimal.Decimal,
amount decimal.Decimal,
side string,
isMakerOnly bool,
expireTimeInSecond int64) (orderId string, err error) {
validPrice := utils.SetDecimal(utils.SetPrecision(price, client.Markets[tradingPair].PricePrecision), client.Markets[tradingPair].PriceDecimal)
validAmount := utils.SetDecimal(amount, client.Markets[tradingPair].AmountDecimal)
orderId, err = client.buildUnsignedOrder(tradingPair, validPrice, validAmount, side, "limit", isMakerOnly, expireTimeInSecond)
if err != nil {
return
}
_, err = client.placeOrder(orderId)
if err == nil {
logrus.Debugf("create limit order at %s - price:%s amount:%s side:%s %s", tradingPair, validPrice, validAmount, side, orderId)
}
return
}
func (client *DdexClient) CreateMarketOrder(
tradingPair string,
priceLimit decimal.Decimal,
amount decimal.Decimal,
side string,
) (res *OrderRes, err error) {
validPrice := utils.SetDecimal(utils.SetPrecision(priceLimit, client.Markets[tradingPair].PricePrecision), client.Markets[tradingPair].PriceDecimal)
amount = utils.SetDecimal(amount, client.Markets[tradingPair].AmountDecimal)
orderId, err := client.buildUnsignedOrder(tradingPair, validPrice, amount, side, "market", false, 3600)
if err != nil {
return
}
res, err = client.placeOrder(orderId)
if err == nil {
logrus.Debugf("create market order at %s - price:%s amount:%s side:%s %s", tradingPair, validPrice, amount, side, orderId)
}
return
}
func (client *DdexClient) MarketSellAsset(
tradingPair string,
assetSymbol string,
amount decimal.Decimal,
maxSlippage decimal.Decimal,
) (
orderId string,
sellAmount decimal.Decimal,
receiveAmount decimal.Decimal,
err error,
) {
orderId = "0x0"
sellAmount = decimal.Zero
receiveAmount = decimal.Zero
_, _, midPrice, err := client.GetMarketPrice(tradingPair)
if err != nil {
return
}
var orderRes *OrderRes
if assetSymbol == strings.Split(tradingPair, "-")[0] {
orderRes, err = client.CreateMarketOrder(
tradingPair,
midPrice.Mul(decimal.New(1, 0).Sub(maxSlippage)),
amount,
utils.SELL,
)
if err != nil {
return
} else {
orderId = orderRes.Id
sellAmount = orderRes.FilledAmount
receiveAmount = orderRes.FilledAmount.Mul(orderRes.AvgPrice)
}
} else {
orderRes, err = client.CreateMarketOrder(
tradingPair,
midPrice.Mul(decimal.New(1, 0).Add(maxSlippage)),
amount,
utils.BUY,
)
if err != nil {
return
} else {
orderId = orderRes.Id
sellAmount = orderRes.FilledAmount
receiveAmount = orderRes.FilledAmount.Div(orderRes.AvgPrice)
}
}
return
}
func (client *DdexClient) PromisedMarketSellAsset(
tradingPair string,
assetSymbol string,
amount decimal.Decimal,
maxSlippage decimal.Decimal,
) (
orderId string,
sellAmount decimal.Decimal,
receiveAmount decimal.Decimal,
err error,
) {
for {
orderId, sellAmount, receiveAmount, err = client.MarketSellAsset(tradingPair, assetSymbol, amount, maxSlippage)
if err != nil {
time.Sleep(time.Second)
} else {
return
}
}
}
func (client *DdexClient) CancelOrder(orderId string) error {
resp, err := client.delete("orders/"+orderId, utils.EmptyKeyPairList)
if err != nil {
return err
}
var dataContainer ICancelOrder
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
return errors.New(dataContainer.Desc)
} else {
logrus.Infof("cancel order %s ", orderId)
return nil
}
}
func (client *DdexClient) CancelAllPendingOrders() error {
for tradingPair, _ := range client.Markets {
resp, err := client.delete("orders", []utils.KeyPair{{"marketId", tradingPair}})
if err != nil {
return err
}
var dataContainer ICancelOrder
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
return errors.New(dataContainer.Desc)
}
}
logrus.Infof("cancel all orders")
return nil
}
func (client *DdexClient) parseDdexOrderResp(orderInfo IOrderResp) *OrderRes {
var orderData = &OrderRes{}
orderData.Id = orderInfo.ID
orderData.Amount, _ = decimal.NewFromString(orderInfo.Amount)
orderData.AvailableAmount, _ = decimal.NewFromString(orderInfo.AvailableAmount)
orderData.Price, _ = decimal.NewFromString(orderInfo.Price)
orderData.AvgPrice, _ = decimal.NewFromString(orderInfo.AveragePrice)
pendingAmount, _ := decimal.NewFromString(orderInfo.PendingAmount)
confirmedAmount, _ := decimal.NewFromString(orderInfo.ConfirmedAmount)
orderData.FilledAmount = pendingAmount.Add(confirmedAmount)
if orderData.AvailableAmount.IsZero() {
orderData.Status = utils.ORDERCLOSE
} else {
orderData.Status = utils.ORDEROPEN
}
if orderInfo.Side == "sell" {
orderData.Side = utils.SELL
} else {
orderData.Side = utils.BUY
}
return orderData
}
func (client *DdexClient) GetOrder(orderId string) (res *OrderRes, err error) {
resp, err := client.get("orders/"+orderId, utils.EmptyKeyPairList)
if err != nil {
return
}
var dataContainer IOrder
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
} else {
res = client.parseDdexOrderResp(dataContainer.Data.Order)
}
return
}
func (client *DdexClient) GetInventory() (inventory Inventory, err error) {
inventory = map[string]*Balance{}
for symbol, asset := range client.Assets {
var amountHex string
amountHex, err = client.hydroContract.Call("balanceOf", common.HexToAddress(asset.Address), common.HexToAddress(client.Address))
if err != nil {
return
}
amount := utils.HexString2Decimal(amountHex, -1*asset.Decimal)
inventory[symbol] = &Balance{amount, decimal.Zero, amount}
}
resp, err := client.get("account/lockedBalances", utils.EmptyKeyPairList)
if err != nil {
return
}
var dataContainer ILockedBalance
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
return
}
for _, lockedBalance := range dataContainer.Data.LockedBalances {
if _, ok := inventory[lockedBalance.Symbol]; ok && lockedBalance.WalletType == "trading" {
amount, _ := decimal.NewFromString(lockedBalance.Amount)
inventory[lockedBalance.Symbol].Lock = amount.Mul(decimal.New(1, -1*client.Assets[lockedBalance.Symbol].Decimal))
inventory[lockedBalance.Symbol].Free = inventory[lockedBalance.Symbol].Total.Sub(inventory[lockedBalance.Symbol].Lock)
}
}
return
}
func (client *DdexClient) GetMarketPrice(tradingPair string) (
bestBidPrice decimal.Decimal,
bestAskPrice decimal.Decimal,
midPrice decimal.Decimal,
err error) {
resp, err := client.get(
fmt.Sprintf("markets/%s/orderbook", tradingPair),
[]utils.KeyPair{{"level", "1"}},
)
if err != nil {
return
}
var dataContainer IOrderbook
json.Unmarshal([]byte(resp), &dataContainer)
if len(dataContainer.Data.OrderBook.Asks) == 0 || len(dataContainer.Data.OrderBook.Bids) == 0 {
err = utils.OrderbookNotComplete
return
}
bestAskPrice, _ = decimal.NewFromString(dataContainer.Data.OrderBook.Asks[0].Price)
bestBidPrice, _ = decimal.NewFromString(dataContainer.Data.OrderBook.Bids[0].Price)
midPrice = bestAskPrice.Add(bestBidPrice).Div(decimal.New(2, 0))
return
}
func (client *DdexClient) QuerySellAssetReceiveAmount(
tradingPair string,
assetSymbol string,
payAmount decimal.Decimal,
) (receiveAmount decimal.Decimal, err error) {
resp, err := client.get(fmt.Sprintf("markets/%s/orderbook", tradingPair), []utils.KeyPair{{"level", "2"}})
if err != nil {
return
}
var dataContainer IOrderbook
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
return
}
receiveAmount = decimal.Zero
if assetSymbol == client.Markets[tradingPair].Quote.Symbol {
for _, ask := range dataContainer.Data.OrderBook.Asks {
price, _ := decimal.NewFromString(ask.Price)
amount, _ := decimal.NewFromString(ask.Amount)
if price.Mul(amount).GreaterThanOrEqual(payAmount) {
receiveAmount = receiveAmount.Add(payAmount.Div(price))
payAmount = decimal.Zero
break
} else {
receiveAmount = receiveAmount.Add(amount)
payAmount = payAmount.Sub(price.Mul(amount))
}
}
if payAmount.IsPositive() {
err = utils.OrderbookDepthNotEnough
}
} else {
for _, bid := range dataContainer.Data.OrderBook.Bids {
price, _ := decimal.NewFromString(bid.Price)
amount, _ := decimal.NewFromString(bid.Amount)
if amount.GreaterThanOrEqual(payAmount) {
receiveAmount = receiveAmount.Add(payAmount.Mul(price))
payAmount = decimal.Zero
break
} else {
receiveAmount = receiveAmount.Add(amount.Mul(price))
payAmount = payAmount.Sub(amount)
}
}
if payAmount.IsPositive() {
err = utils.OrderbookDepthNotEnough
}
}
return
}
func (client *DdexClient) GetAssetUSDPrice(assetSymbol string) (price decimal.Decimal, err error) {
price = decimal.New(-1, 0)
resp, err := client.get("assets", utils.EmptyKeyPairList)
if err != nil {
return
}
var dataContainer IAssets
json.Unmarshal([]byte(resp), &dataContainer)
if dataContainer.Desc != "success" {
err = errors.New(dataContainer.Desc)
return
}
for _, asset := range dataContainer.Data.Assets {
if assetSymbol == asset.Symbol {
price, _ = decimal.NewFromString(asset.OracleUSDPrice)
}
}
if price.LessThanOrEqual(decimal.Zero) {
err = errors.New("not find asset usd price")
}
return
}
| [
"\"ETHEREUM_NODE_URL\"",
"\"DDEX_URL\"",
"\"HYDRO_CONTRACT_ADDRESS\""
]
| []
| [
"DDEX_URL",
"ETHEREUM_NODE_URL",
"HYDRO_CONTRACT_ADDRESS"
]
| [] | ["DDEX_URL", "ETHEREUM_NODE_URL", "HYDRO_CONTRACT_ADDRESS"] | go | 3 | 0 | |
xyz-hub-test/src/test/java/com/here/xyz/hub/rest/RestAssuredConfig.java | /*
* Copyright (C) 2017-2019 HERE Europe B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* License-Filename: LICENSE
*/
package com.here.xyz.hub.rest;
public class RestAssuredConfig {
public String baseURI;
public int hubPort;
public int connectorPort;
public String fullHubUri;
public String fullHttpConnectorUri;
private RestAssuredConfig() {
}
private static RestAssuredConfig config = null;
public static RestAssuredConfig config() {
if (config == null) {
config = localConfig();
}
return config;
}
private static RestAssuredConfig localConfig() {
RestAssuredConfig config = new RestAssuredConfig();
String envPort = System.getenv("HTTP_PORT");
String host = System.getenv().containsKey("HTTP_HOST") ? System.getenv("HTTP_HOST") : "localhost";
String service = System.getenv().containsKey("HTTP_SERVICE") ? System.getenv("HTTP_SERVICE") : "hub";
config.baseURI = "http://"+host+"/" + service;
config.hubPort = 8080;
config.connectorPort = 9090;
config.fullHubUri = "http://"+host+":"+config.hubPort +"/" + service;
config.fullHttpConnectorUri = "http://"+host+":"+config.connectorPort +"/psql";
try {
config.hubPort = Integer.parseInt(envPort);
}
catch (NumberFormatException ignore) {}
return config;
}
}
| [
"\"HTTP_PORT\"",
"\"HTTP_HOST\"",
"\"HTTP_SERVICE\""
]
| []
| [
"HTTP_SERVICE",
"HTTP_PORT",
"HTTP_HOST"
]
| [] | ["HTTP_SERVICE", "HTTP_PORT", "HTTP_HOST"] | java | 3 | 0 | |
vendor/github.com/btcsuite/btcutil/appdata.go | // Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil
import (
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"unicode"
)
// appDataDir returns an operating system specific directory to be used for
// storing application data for an application. See AppDataDir for more
// details. This unexported version takes an operating system argument
// primarily to enable the testing package to properly test the function by
// forcing an operating system that is not the currently one.
func appDataDir(goos, appName string, roaming bool) string {
if appName == "" || appName == "." {
return "."
}
// The caller really shouldn't prepend the appName with a period, but
// if they do, handle it gracefully by trimming it.
appName = strings.TrimPrefix(appName, ".")
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// Get the OS specific home directory via the Go standard lib.
var homeDir string
usr, err := user.Current()
if err == nil {
homeDir = usr.HomeDir
}
// Fall back to standard HOME environment variable that works
// for most POSIX OSes if the directory from the Go standard
// lib failed.
if err != nil || homeDir == "" {
homeDir = os.Getenv("HOME")
}
switch goos {
// Attempt to use the LOCALAPPDATA or APPDATA environment variable on
// Windows.
case "windows":
// Windows XP and before didn't have a LOCALAPPDATA, so fallback
// to regular APPDATA when LOCALAPPDATA is not set.
appData := os.Getenv("LOCALAPPDATA")
if roaming || appData == "" {
appData = os.Getenv("APPDATA")
}
if appData != "" {
return filepath.Join(appData, appNameUpper)
}
case "darwin":
if homeDir != "" {
return filepath.Join(homeDir, "Library",
"Application Support", appNameUpper)
}
case "plan9":
if homeDir != "" {
return filepath.Join(homeDir, appNameLower)
}
default:
if homeDir != "" {
return filepath.Join(homeDir, "."+appNameLower)
}
}
// Fall back to the current directory if all else fails.
return "."
}
// AppDataDir returns an operating system specific directory to be used for
// storing application data for an application.
//
// The appName parameter is the name of the application the data directory is
// being requested for. This function will prepend a period to the appName for
// POSIX style operating systems since that is standard practice. An empty
// appName or one with a single dot is treated as requesting the current
// directory so only "." will be returned. Further, the first character
// of appName will be made lowercase for POSIX style operating systems and
// uppercase for Mac and Windows since that is standard practice.
//
// The roaming parameter only applies to Windows where it specifies the roaming
// application data profile (%APPDATA%) should be used instead of the local one
// (%LOCALAPPDATA%) that is used by default.
//
// Example results:
// dir := AppDataDir("myapp", false)
// POSIX (Linux/BSD): ~/.myapp
// Mac OS: $HOME/Library/Application Support/Myapp
// Windows: %LOCALAPPDATA%\Myapp
// Plan 9: $home/myapp
func AppDataDir(appName string, roaming bool) string {
return appDataDir(runtime.GOOS, appName, roaming)
}
| [
"\"HOME\"",
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"HOME",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "HOME", "LOCALAPPDATA"] | go | 3 | 0 | |
friend_fighter/asgi.py | """
ASGI config for friend_fighter project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'friend_fighter.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
build/lambda/schedule/schedule.go | package main
import (
"context"
"os"
"github.com/lendi-au/helm-janitor/cmd/scan"
"github.com/lendi-au/helm-janitor/internal/config"
log "github.com/sirupsen/logrus"
)
// runs the generic handler to execute helm delete...
// when the ttl expires.
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
logLevel := "info"
if os.Getenv("LOG_LEVEL") != "" {
logLevel = os.Getenv("LOG_LEVEL")
}
level, err := log.ParseLevel(logLevel)
if err != nil {
log.Errorf("Dodgy log level set: %s", logLevel)
log.SetLevel(log.WarnLevel)
} else {
log.SetLevel(level)
}
}
// HandleRequest runs the scan package code to look for old releases and deletes them
func HandleRequest() error {
ctx := context.Background()
scanner := scan.NewScanClient()
scanner.Dryrun = config.GetenvWithDefaultBool("DRY_RUN", false)
scanner.AllNamespaces = config.GetenvWithDefaultBool("ALL_NAMESPACES", true)
scanner.Namespace = config.GetenvWithDefault("NAMESPACE", "")
scanner.IncludeNamespaces = config.GetenvWithDefault("INCLUDE_NAMESPACES", "")
scanner.ExcludeNamespaces = config.GetenvWithDefault("EXCLUDE_NAMESPACES", "")
scanner.Context = ctx
scanner.Init()
log.Info("starting...")
scan.RunV2(scanner)
return nil
}
func main() {
log.Infof("starting")
HandleRequest()
// lambda.Start(HandleRequest)
log.Infof("finished")
}
| [
"\"LOG_LEVEL\"",
"\"LOG_LEVEL\""
]
| []
| [
"LOG_LEVEL"
]
| [] | ["LOG_LEVEL"] | go | 1 | 0 | |
.vendor/db/tsdb/engine/tsm1/engine.go | // Package tsm1 provides a TSDB in the Time Structured Merge tree format.
package tsm1
import (
"archive/tar"
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cnosdb/cnosql"
"github.com/cnosdb/db/logger"
"github.com/cnosdb/db/models"
"github.com/cnosdb/db/pkg/bytesutil"
"github.com/cnosdb/db/pkg/estimator"
"github.com/cnosdb/db/pkg/file"
"github.com/cnosdb/db/pkg/limiter"
"github.com/cnosdb/db/pkg/metrics"
"github.com/cnosdb/db/pkg/radix"
intar "github.com/cnosdb/db/pkg/tar"
"github.com/cnosdb/db/pkg/tracing"
"github.com/cnosdb/db/query"
"github.com/cnosdb/db/tsdb"
_ "github.com/cnosdb/db/tsdb/index"
"github.com/cnosdb/db/tsdb/index/inmem"
"github.com/cnosdb/db/tsdb/index/tsi1"
"go.uber.org/zap"
)
//go:generate tmpl [email protected] iterator.gen.go.tmpl engine.gen.go.tmpl array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl
//go:generate tmpl -data=@file_store.gen.go.tmpldata file_store.gen.go.tmpl file_store_array.gen.go.tmpl
//go:generate tmpl [email protected] encoding.gen.go.tmpl
//go:generate tmpl [email protected] compact.gen.go.tmpl
//go:generate tmpl [email protected] reader.gen.go.tmpl
func init() {
tsdb.RegisterEngine("tsm1", NewEngine)
}
var (
// Ensure Engine implements the interface.
_ tsdb.Engine = &Engine{}
// Static objects to prevent small allocs.
timeBytes = []byte("time")
keyFieldSeparatorBytes = []byte(keyFieldSeparator)
emptyBytes = []byte{}
)
var (
tsmGroup = metrics.MustRegisterGroup("tsm1")
numberOfRefCursorsCounter = metrics.MustRegisterCounter("cursors_ref", metrics.WithGroup(tsmGroup))
numberOfAuxCursorsCounter = metrics.MustRegisterCounter("cursors_aux", metrics.WithGroup(tsmGroup))
numberOfCondCursorsCounter = metrics.MustRegisterCounter("cursors_cond", metrics.WithGroup(tsmGroup))
planningTimer = metrics.MustRegisterTimer("planning_time", metrics.WithGroup(tsmGroup))
)
// NewContextWithMetricsGroup creates a new context with a tsm1 metrics.Group for tracking
// various metrics when accessing TSM data.
func NewContextWithMetricsGroup(ctx context.Context) context.Context {
group := metrics.NewGroup(tsmGroup)
return metrics.NewContextWithGroup(ctx, group)
}
// MetricsGroupFromContext returns the tsm1 metrics.Group associated with the context
// or nil if no group has been assigned.
func MetricsGroupFromContext(ctx context.Context) *metrics.Group {
return metrics.GroupFromContext(ctx)
}
const (
// keyFieldSeparator separates the series key from the field name in the composite key
// that identifies a specific field in series
keyFieldSeparator = "#!~#"
// deleteFlushThreshold is the size in bytes of a batch of series keys to delete.
deleteFlushThreshold = 50 * 1024 * 1024
)
// Statistics gathered by the engine.
const (
statCacheCompactions = "cacheCompactions"
statCacheCompactionsActive = "cacheCompactionsActive"
statCacheCompactionError = "cacheCompactionErr"
statCacheCompactionDuration = "cacheCompactionDuration"
statTSMLevel1Compactions = "tsmLevel1Compactions"
statTSMLevel1CompactionsActive = "tsmLevel1CompactionsActive"
statTSMLevel1CompactionError = "tsmLevel1CompactionErr"
statTSMLevel1CompactionDuration = "tsmLevel1CompactionDuration"
statTSMLevel1CompactionQueue = "tsmLevel1CompactionQueue"
statTSMLevel2Compactions = "tsmLevel2Compactions"
statTSMLevel2CompactionsActive = "tsmLevel2CompactionsActive"
statTSMLevel2CompactionError = "tsmLevel2CompactionErr"
statTSMLevel2CompactionDuration = "tsmLevel2CompactionDuration"
statTSMLevel2CompactionQueue = "tsmLevel2CompactionQueue"
statTSMLevel3Compactions = "tsmLevel3Compactions"
statTSMLevel3CompactionsActive = "tsmLevel3CompactionsActive"
statTSMLevel3CompactionError = "tsmLevel3CompactionErr"
statTSMLevel3CompactionDuration = "tsmLevel3CompactionDuration"
statTSMLevel3CompactionQueue = "tsmLevel3CompactionQueue"
statTSMOptimizeCompactions = "tsmOptimizeCompactions"
statTSMOptimizeCompactionsActive = "tsmOptimizeCompactionsActive"
statTSMOptimizeCompactionError = "tsmOptimizeCompactionErr"
statTSMOptimizeCompactionDuration = "tsmOptimizeCompactionDuration"
statTSMOptimizeCompactionQueue = "tsmOptimizeCompactionQueue"
statTSMFullCompactions = "tsmFullCompactions"
statTSMFullCompactionsActive = "tsmFullCompactionsActive"
statTSMFullCompactionError = "tsmFullCompactionErr"
statTSMFullCompactionDuration = "tsmFullCompactionDuration"
statTSMFullCompactionQueue = "tsmFullCompactionQueue"
)
// Engine represents a storage engine with compressed blocks.
type Engine struct {
mu sync.RWMutex
index tsdb.Index
// The following group of fields is used to track the state of level compactions within the
// Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is
// used to signal those goroutines to shutdown. Every request to disable level compactions will
// call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the
// lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will
// decrease 'levelWorkers', and when it decreases to zero, level compactions will be started
// back up again.
wg *sync.WaitGroup // waitgroup for active level compaction goroutines
done chan struct{} // channel to signal level compactions to stop
levelWorkers int // Number of "workers" that expect compactions to be in a disabled state
snapDone chan struct{} // channel to signal snapshot compactions to stop
snapWG *sync.WaitGroup // waitgroup for running snapshot compactions
id uint64
path string
sfile *tsdb.SeriesFile
logger *zap.Logger // Logger to be used for important messages
traceLogger *zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
fieldset *tsdb.MeasurementFieldSet
WAL *WAL
Cache *Cache
Compactor *Compactor
CompactionPlan CompactionPlanner
FileStore *FileStore
MaxPointsPerBlock int
// CacheFlushMemorySizeThreshold specifies the minimum size threshold for
// the cache when the engine should write a snapshot to a TSM file
CacheFlushMemorySizeThreshold uint64
// CacheFlushWriteColdDuration specifies the length of time after which if
// no writes have been committed to the WAL, the engine will write
// a snapshot of the cache to a TSM file
CacheFlushWriteColdDuration time.Duration
// WALEnabled determines whether writes to the WAL are enabled. If this is false,
// writes will only exist in the cache and can be lost if a snapshot has not occurred.
WALEnabled bool
// Invoked when creating a backup file "as new".
formatFileName FormatFileNameFunc
// Controls whether to enabled compactions when the engine is open
enableCompactionsOnOpen bool
stats *EngineStatistics
// Limiter for concurrent compactions.
compactionLimiter limiter.Fixed
scheduler *scheduler
// provides access to the total set of series IDs
seriesIDSets tsdb.SeriesIDSets
// seriesTypeMap maps a series key to field type
seriesTypeMap *radix.Tree
// muDigest ensures only one goroutine can generate a digest at a time.
muDigest sync.RWMutex
}
// NewEngine returns a new instance of Engine.
func NewEngine(id uint64, idx tsdb.Index, path string, walPath string, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Engine {
var wal *WAL
if opt.WALEnabled {
wal = NewWAL(walPath)
wal.syncDelay = time.Duration(opt.Config.WALFsyncDelay)
}
fs := NewFileStore(path)
fs.openLimiter = opt.OpenLimiter
if opt.FileStoreObserver != nil {
fs.WithObserver(opt.FileStoreObserver)
}
fs.tsmMMAPWillNeed = opt.Config.TSMWillNeed
cache := NewCache(uint64(opt.Config.CacheMaxMemorySize))
c := NewCompactor()
c.Dir = path
c.FileStore = fs
c.RateLimit = opt.CompactionThroughputLimiter
var planner CompactionPlanner = NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration))
if opt.CompactionPlannerCreator != nil {
planner = opt.CompactionPlannerCreator(opt.Config).(CompactionPlanner)
planner.SetFileStore(fs)
}
logger := zap.NewNop()
stats := &EngineStatistics{}
e := &Engine{
id: id,
path: path,
index: idx,
sfile: sfile,
logger: logger,
traceLogger: logger,
traceLogging: opt.Config.TraceLoggingEnabled,
WAL: wal,
Cache: cache,
FileStore: fs,
Compactor: c,
CompactionPlan: planner,
CacheFlushMemorySizeThreshold: uint64(opt.Config.CacheSnapshotMemorySize),
CacheFlushWriteColdDuration: time.Duration(opt.Config.CacheSnapshotWriteColdDuration),
enableCompactionsOnOpen: true,
WALEnabled: opt.WALEnabled,
formatFileName: DefaultFormatFileName,
stats: stats,
compactionLimiter: opt.CompactionLimiter,
scheduler: newScheduler(stats, opt.CompactionLimiter.Capacity()),
seriesIDSets: opt.SeriesIDSets,
}
// Feature flag to enable per-series type checking, by default this is off and
// e.seriesTypeMap will be nil.
if os.Getenv("CNOSDB_SERIES_TYPE_CHECK_ENABLED") != "" {
e.seriesTypeMap = radix.New()
}
if e.traceLogging {
fs.enableTraceLogging(true)
if e.WALEnabled {
e.WAL.enableTraceLogging(true)
}
}
return e
}
func (e *Engine) WithFormatFileNameFunc(formatFileNameFunc FormatFileNameFunc) {
e.Compactor.WithFormatFileNameFunc(formatFileNameFunc)
e.formatFileName = formatFileNameFunc
}
func (e *Engine) WithParseFileNameFunc(parseFileNameFunc ParseFileNameFunc) {
e.FileStore.WithParseFileNameFunc(parseFileNameFunc)
e.Compactor.WithParseFileNameFunc(parseFileNameFunc)
}
// Digest returns a reader for the shard's digest.
func (e *Engine) Digest() (io.ReadCloser, int64, error) {
e.muDigest.Lock()
defer e.muDigest.Unlock()
log, logEnd := logger.NewOperation(e.logger, "Engine digest", "tsm1_digest")
defer logEnd()
log.Info("Starting digest", zap.String("tsm1_path", e.path))
digestPath := filepath.Join(e.path, DigestFilename)
// Get a list of tsm file paths from the FileStore.
files := e.FileStore.Files()
tsmfiles := make([]string, 0, len(files))
for _, f := range files {
tsmfiles = append(tsmfiles, f.Path())
}
// See if there's a fresh digest cached on disk.
fresh, reason := DigestFresh(e.path, tsmfiles, e.LastModified())
if fresh {
f, err := os.Open(digestPath)
if err == nil {
fi, err := f.Stat()
if err != nil {
log.Info("Digest aborted, couldn't stat digest file", logger.Shard(e.id), zap.Error(err))
return nil, 0, err
}
log.Info("Digest is fresh", logger.Shard(e.id), zap.String("path", digestPath))
// Return the cached digest.
return f, fi.Size(), nil
}
}
log.Info("Digest stale", logger.Shard(e.id), zap.String("reason", reason))
// Either no digest existed or the existing one was stale
// so generate a new digest.
// Make sure the directory exists, in case it was deleted for some reason.
if err := os.MkdirAll(e.path, 0777); err != nil {
log.Info("Digest aborted, problem creating shard directory path", zap.Error(err))
return nil, 0, err
}
// Create a tmp file to write the digest to.
tf, err := os.Create(digestPath + ".tmp")
if err != nil {
log.Info("Digest aborted, problem creating tmp digest", zap.Error(err))
return nil, 0, err
}
// Write the new digest to the tmp file.
if err := Digest(e.path, tsmfiles, tf); err != nil {
log.Info("Digest aborted, problem writing tmp digest", zap.Error(err))
tf.Close()
os.Remove(tf.Name())
return nil, 0, err
}
// Rename the temporary digest file to the actual digest file.
if err := file.RenameFile(tf.Name(), digestPath); err != nil {
log.Info("Digest aborted, problem renaming tmp digest", zap.Error(err))
return nil, 0, err
}
// Create and return a reader for the new digest file.
f, err := os.Open(digestPath)
if err != nil {
log.Info("Digest aborted, opening new digest", zap.Error(err))
return nil, 0, err
}
fi, err := f.Stat()
if err != nil {
log.Info("Digest aborted, can't stat new digest", zap.Error(err))
f.Close()
return nil, 0, err
}
log.Info("Digest written", zap.String("tsm1_digest_path", digestPath), zap.Int64("size", fi.Size()))
return f, fi.Size(), nil
}
// SetEnabled sets whether the engine is enabled.
func (e *Engine) SetEnabled(enabled bool) {
e.enableCompactionsOnOpen = enabled
e.SetCompactionsEnabled(enabled)
}
// SetCompactionsEnabled enables compactions on the engine. When disabled
// all running compactions are aborted and new compactions stop running.
func (e *Engine) SetCompactionsEnabled(enabled bool) {
if enabled {
e.enableSnapshotCompactions()
e.enableLevelCompactions(false)
} else {
e.disableSnapshotCompactions()
e.disableLevelCompactions(false)
}
}
// enableLevelCompactions will request that level compactions start back up again
//
// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some
// point, and the associated task that required disabled compactions is now complete
func (e *Engine) enableLevelCompactions(wait bool) {
// If we don't need to wait, see if we're already enabled
if !wait {
e.mu.RLock()
if e.done != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
}
e.mu.Lock()
if wait {
e.levelWorkers -= 1
}
if e.levelWorkers != 0 || e.done != nil {
// still waiting on more workers or already enabled
e.mu.Unlock()
return
}
// last one to enable, start things back up
e.Compactor.EnableCompactions()
e.done = make(chan struct{})
wg := new(sync.WaitGroup)
wg.Add(1)
e.wg = wg
e.mu.Unlock()
go func() { defer wg.Done(); e.compact(wg) }()
}
// disableLevelCompactions will stop level compactions before returning.
//
// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be
// required before level compactions will start back up again.
func (e *Engine) disableLevelCompactions(wait bool) {
e.mu.Lock()
old := e.levelWorkers
if wait {
e.levelWorkers += 1
}
// Hold onto the current done channel so we can wait on it if necessary
waitCh := e.done
wg := e.wg
if old == 0 && e.done != nil {
// It's possible we have closed the done channel and released the lock and another
// goroutine has attempted to disable compactions. We're current in the process of
// disabling them so check for this and wait until the original completes.
select {
case <-e.done:
e.mu.Unlock()
return
default:
}
// Prevent new compactions from starting
e.Compactor.DisableCompactions()
// Stop all background compaction goroutines
close(e.done)
e.mu.Unlock()
wg.Wait()
// Signal that all goroutines have exited.
e.mu.Lock()
e.done = nil
e.mu.Unlock()
return
}
e.mu.Unlock()
// Compaction were already disabled.
if waitCh == nil {
return
}
// We were not the first caller to disable compactions and they were in the process
// of being disabled. Wait for them to complete before returning.
<-waitCh
wg.Wait()
}
func (e *Engine) enableSnapshotCompactions() {
// Check if already enabled under read lock
e.mu.RLock()
if e.snapDone != nil {
e.mu.RUnlock()
return
}
e.mu.RUnlock()
// Check again under write lock
e.mu.Lock()
if e.snapDone != nil {
e.mu.Unlock()
return
}
e.Compactor.EnableSnapshots()
e.snapDone = make(chan struct{})
wg := new(sync.WaitGroup)
wg.Add(1)
e.snapWG = wg
e.mu.Unlock()
go func() { defer wg.Done(); e.compactCache() }()
}
func (e *Engine) disableSnapshotCompactions() {
e.mu.Lock()
if e.snapDone == nil {
e.mu.Unlock()
return
}
// We may be in the process of stopping snapshots. See if the channel
// was closed.
select {
case <-e.snapDone:
e.mu.Unlock()
return
default:
}
// first one here, disable and wait for completion
close(e.snapDone)
e.Compactor.DisableSnapshots()
wg := e.snapWG
e.mu.Unlock()
// Wait for the snapshot goroutine to exit.
wg.Wait()
// Signal that the goroutines are exit and everything is stopped by setting
// snapDone to nil.
e.mu.Lock()
e.snapDone = nil
e.mu.Unlock()
// If the cache is empty, free up its resources as well.
if e.Cache.Size() == 0 {
e.Cache.Free()
}
}
// ScheduleFullCompaction will force the engine to fully compact all data stored.
// This will cancel and running compactions and snapshot any data in the cache to
// TSM files. This is an expensive operation.
func (e *Engine) ScheduleFullCompaction() error {
// Snapshot any data in the cache
if err := e.WriteSnapshot(); err != nil {
return err
}
// Cancel running compactions
e.SetCompactionsEnabled(false)
// Ensure compactions are restarted
defer e.SetCompactionsEnabled(true)
// Force the planner to only create a full plan.
e.CompactionPlan.ForceFull()
return nil
}
// Path returns the path the engine was opened with.
func (e *Engine) Path() string { return e.path }
func (e *Engine) SetFieldName(measurement []byte, name string) {
e.index.SetFieldName(measurement, name)
}
func (e *Engine) MeasurementExists(name []byte) (bool, error) {
return e.index.MeasurementExists(name)
}
func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return e.index.MeasurementNamesByRegex(re)
}
// MeasurementFieldSet returns the measurement field set.
func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet {
return e.fieldset
}
// MeasurementFields returns the measurement fields for a measurement.
func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {
return e.fieldset.CreateFieldsIfNotExists(measurement)
}
func (e *Engine) HasTagKey(name, key []byte) (bool, error) {
return e.index.HasTagKey(name, key)
}
func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr cnosql.Expr) (map[string]struct{}, error) {
return e.index.MeasurementTagKeysByExpr(name, expr)
}
func (e *Engine) TagKeyCardinality(name, key []byte) int {
return e.index.TagKeyCardinality(name, key)
}
// SeriesN returns the unique number of series in the index.
func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
// MeasurementsSketches returns sketches that describe the cardinality of the
// measurements in this shard and measurements that were in this shard, but have
// been tombstoned.
func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.MeasurementsSketches()
}
// SeriesSketches returns sketches that describe the cardinality of the
// series in this shard and series that were in this shard, but have
// been tombstoned.
func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.SeriesSketches()
}
// LastModified returns the time when this shard was last modified.
func (e *Engine) LastModified() time.Time {
fsTime := e.FileStore.LastModified()
if e.WALEnabled && e.WAL.LastWriteTime().After(fsTime) {
return e.WAL.LastWriteTime()
}
return fsTime
}
// EngineStatistics maintains statistics for the engine.
type EngineStatistics struct {
CacheCompactions int64 // Counter of cache compactions that have ever run.
CacheCompactionsActive int64 // Gauge of cache compactions currently running.
CacheCompactionErrors int64 // Counter of cache compactions that have failed due to error.
CacheCompactionDuration int64 // Counter of number of wall nanoseconds spent in cache compactions.
TSMCompactions [3]int64 // Counter of TSM compactions (by level) that have ever run.
TSMCompactionsActive [3]int64 // Gauge of TSM compactions (by level) currently running.
TSMCompactionErrors [3]int64 // Counter of TSM compcations (by level) that have failed due to error.
TSMCompactionDuration [3]int64 // Counter of number of wall nanoseconds spent in TSM compactions (by level).
TSMCompactionsQueue [3]int64 // Gauge of TSM compactions queues (by level).
TSMOptimizeCompactions int64 // Counter of optimize compactions that have ever run.
TSMOptimizeCompactionsActive int64 // Gauge of optimize compactions currently running.
TSMOptimizeCompactionErrors int64 // Counter of optimize compactions that have failed due to error.
TSMOptimizeCompactionDuration int64 // Counter of number of wall nanoseconds spent in optimize compactions.
TSMOptimizeCompactionsQueue int64 // Gauge of optimize compactions queue.
TSMFullCompactions int64 // Counter of full compactions that have ever run.
TSMFullCompactionsActive int64 // Gauge of full compactions currently running.
TSMFullCompactionErrors int64 // Counter of full compactions that have failed due to error.
TSMFullCompactionDuration int64 // Counter of number of wall nanoseconds spent in full compactions.
TSMFullCompactionsQueue int64 // Gauge of full compactions queue.
}
// Statistics returns statistics for periodic monitoring.
func (e *Engine) Statistics(tags map[string]string) []models.Statistic {
statistics := make([]models.Statistic, 0, 4)
statistics = append(statistics, models.Statistic{
Name: "tsm1_engine",
Tags: tags,
Values: map[string]interface{}{
statCacheCompactions: atomic.LoadInt64(&e.stats.CacheCompactions),
statCacheCompactionsActive: atomic.LoadInt64(&e.stats.CacheCompactionsActive),
statCacheCompactionError: atomic.LoadInt64(&e.stats.CacheCompactionErrors),
statCacheCompactionDuration: atomic.LoadInt64(&e.stats.CacheCompactionDuration),
statTSMLevel1Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[0]),
statTSMLevel1CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]),
statTSMLevel1CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[0]),
statTSMLevel1CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[0]),
statTSMLevel1CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[0]),
statTSMLevel2Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[1]),
statTSMLevel2CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]),
statTSMLevel2CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[1]),
statTSMLevel2CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[1]),
statTSMLevel2CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[1]),
statTSMLevel3Compactions: atomic.LoadInt64(&e.stats.TSMCompactions[2]),
statTSMLevel3CompactionsActive: atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]),
statTSMLevel3CompactionError: atomic.LoadInt64(&e.stats.TSMCompactionErrors[2]),
statTSMLevel3CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[2]),
statTSMLevel3CompactionQueue: atomic.LoadInt64(&e.stats.TSMCompactionsQueue[2]),
statTSMOptimizeCompactions: atomic.LoadInt64(&e.stats.TSMOptimizeCompactions),
statTSMOptimizeCompactionsActive: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive),
statTSMOptimizeCompactionError: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionErrors),
statTSMOptimizeCompactionDuration: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionDuration),
statTSMOptimizeCompactionQueue: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsQueue),
statTSMFullCompactions: atomic.LoadInt64(&e.stats.TSMFullCompactions),
statTSMFullCompactionsActive: atomic.LoadInt64(&e.stats.TSMFullCompactionsActive),
statTSMFullCompactionError: atomic.LoadInt64(&e.stats.TSMFullCompactionErrors),
statTSMFullCompactionDuration: atomic.LoadInt64(&e.stats.TSMFullCompactionDuration),
statTSMFullCompactionQueue: atomic.LoadInt64(&e.stats.TSMFullCompactionsQueue),
},
})
statistics = append(statistics, e.Cache.Statistics(tags)...)
statistics = append(statistics, e.FileStore.Statistics(tags)...)
if e.WALEnabled {
statistics = append(statistics, e.WAL.Statistics(tags)...)
}
return statistics
}
// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.
func (e *Engine) DiskSize() int64 {
var walDiskSizeBytes int64
if e.WALEnabled {
walDiskSizeBytes = e.WAL.DiskSizeBytes()
}
return e.FileStore.DiskSizeBytes() + walDiskSizeBytes
}
// Open opens and initializes the engine.
func (e *Engine) Open() error {
if err := os.MkdirAll(e.path, 0777); err != nil {
return err
}
if err := e.cleanup(); err != nil {
return err
}
fields, err := tsdb.NewMeasurementFieldSet(filepath.Join(e.path, "fields.idx"))
if err != nil {
e.logger.Warn(fmt.Sprintf("error opening fields.idx: %v. Rebuilding.", err))
}
e.mu.Lock()
e.fieldset = fields
e.mu.Unlock()
e.index.SetFieldSet(fields)
if e.WALEnabled {
if err := e.WAL.Open(); err != nil {
return err
}
}
if err := e.FileStore.Open(); err != nil {
return err
}
if e.WALEnabled {
if err := e.reloadCache(); err != nil {
return err
}
}
e.Compactor.Open()
if e.enableCompactionsOnOpen {
e.SetCompactionsEnabled(true)
}
return nil
}
// Close closes the engine. Subsequent calls to Close are a nop.
func (e *Engine) Close() error {
e.SetCompactionsEnabled(false)
// Lock now and close everything else down.
e.mu.Lock()
defer e.mu.Unlock()
e.done = nil // Ensures that the channel will not be closed again.
if err := e.FileStore.Close(); err != nil {
return err
}
if e.WALEnabled {
return e.WAL.Close()
}
return nil
}
// WithLogger sets the logger for the engine.
func (e *Engine) WithLogger(log *zap.Logger) {
e.logger = log.With(zap.String("engine", "tsm1"))
if e.traceLogging {
e.traceLogger = e.logger
}
if e.WALEnabled {
e.WAL.WithLogger(e.logger)
}
e.FileStore.WithLogger(e.logger)
}
// LoadMetadataIndex loads the shard metadata into memory.
//
// Note, it not safe to call LoadMetadataIndex concurrently. LoadMetadataIndex
// should only be called when initialising a new Engine.
func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
now := time.Now()
// Save reference to index for iterator creation.
e.index = index
// If we have the cached fields index on disk and we're using TSI, we
// can skip scanning all the TSM files.
if e.index.Type() != inmem.IndexName && !e.fieldset.IsEmpty() {
return nil
}
keys := make([][]byte, 0, 10000)
fieldTypes := make([]cnosql.DataType, 0, 10000)
if err := e.FileStore.WalkKeys(nil, func(key []byte, typ byte) error {
fieldType := BlockTypeToCnosQLDataType(typ)
if fieldType == cnosql.Unknown {
return fmt.Errorf("unknown block type: %v", typ)
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
return nil
}); err != nil {
return err
}
if len(keys) > 0 {
// Add remaining partial batch from FileStore.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
// load metadata from the Cache
if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error {
fieldType, err := entry.values.CnosQLType()
if err != nil {
e.logger.Info("Error getting the data type of values for key", zap.ByteString("key", key), zap.Error(err))
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
return nil
}); err != nil {
return err
}
if len(keys) > 0 {
// Add remaining partial batch from FileStore.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
}
// Save the field set index so we don't have to rebuild it next time
if err := e.fieldset.Save(); err != nil {
return err
}
e.traceLogger.Info("Meta data index for shard loaded", zap.Uint64("id", shardID), zap.Duration("duration", time.Since(now)))
return nil
}
// IsIdle returns true if the cache is empty, there are no running compactions and the
// shard is fully compacted.
func (e *Engine) IsIdle() bool {
cacheEmpty := e.Cache.Size() == 0
runningCompactions := atomic.LoadInt64(&e.stats.CacheCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[0])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[1])
runningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[2])
runningCompactions += atomic.LoadInt64(&e.stats.TSMFullCompactionsActive)
runningCompactions += atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive)
return cacheEmpty && runningCompactions == 0 && e.CompactionPlan.FullyCompacted()
}
// Free releases any resources held by the engine to free up memory or CPU.
func (e *Engine) Free() error {
e.Cache.Free()
return e.FileStore.Free()
}
// Backup writes a tar archive of any TSM files modified since the passed
// in time to the passed in writer. The basePath will be prepended to the names
// of the files in the archive. It will force a snapshot of the WAL first
// then perform the backup with a read lock against the file store. This means
// that new TSM files will not be able to be created in this shard while the
// backup is running. For shards that are still actively getting writes, this
// could cause the WAL to backup, increasing memory usage and eventually rejecting writes.
func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since))
}
func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
if !strings.HasSuffix(fi.Name(), ".tsm") {
return intar.StreamFile(fi, shardRelativePath, fullPath, tw)
}
var tombstonePath string
f, err := os.Open(fullPath)
if err != nil {
return err
}
r, err := NewTSMReader(f)
if err != nil {
return err
}
// Grab the tombstone file if one exists.
if r.HasTombstones() {
tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path)
return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw)
}
min, max := r.TimeRange()
stun := start.UnixNano()
eun := end.UnixNano()
// We overlap time ranges, we need to filter the file
if min >= stun && min <= eun && max > eun || // overlap to the right
max >= stun && max <= eun && min < stun || // overlap to the left
min <= stun && max >= eun { // TSM file has a range LARGER than the boundary
err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw)
if err != nil {
if err := r.Close(); err != nil {
return err
}
return err
}
}
// above is the only case where we need to keep the reader open.
if err := r.Close(); err != nil {
return err
}
// the TSM file is 100% inside the range, so we can just write it without scanning each block
if min >= start.UnixNano() && max <= end.UnixNano() {
if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil {
return err
}
}
return nil
}
}
func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
path, err := e.CreateSnapshot()
if err != nil {
return err
}
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end))
}
func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
path := fullPath + ".tmp"
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return err
}
defer os.Remove(path)
w, err := NewTSMWriter(out)
if err != nil {
return err
}
defer w.Close()
// implicit else: here we iterate over the blocks and only keep the ones we really want.
bi := r.BlockIterator()
for bi.Next() {
// not concerned with typ or checksum since we are just blindly writing back, with no decoding
key, minTime, maxTime, _, _, buf, err := bi.Read()
if err != nil {
return err
}
if minTime >= start && minTime <= end ||
maxTime >= start && maxTime <= end ||
minTime <= start && maxTime >= end {
err := w.WriteBlock(key, minTime, maxTime, buf)
if err != nil {
return err
}
}
}
if err := bi.Err(); err != nil {
return err
}
err = w.WriteIndex()
if err != nil {
return err
}
// make sure the whole file is out to disk
if err := w.Flush(); err != nil {
return err
}
tmpFi, err := os.Stat(path)
if err != nil {
return err
}
return intar.StreamRenameFile(tmpFi, fi.Name(), shardRelativePath, path, tw)
}
// Restore reads a tar archive generated by Backup().
// Only files that match basePath will be copied into the directory. This obtains
// a write lock so no operations can be performed while restoring.
func (e *Engine) Restore(r io.Reader, basePath string) error {
return e.overlay(r, basePath, false)
}
// Import reads a tar archive generated by Backup() and adds each
// file matching basePath as a new TSM file. This obtains
// a write lock so no operations can be performed while Importing.
// If the import is successful, a full compaction is scheduled.
func (e *Engine) Import(r io.Reader, basePath string) error {
if err := e.overlay(r, basePath, true); err != nil {
return err
}
return e.ScheduleFullCompaction()
}
// overlay reads a tar archive generated by Backup() and adds each file
// from the archive matching basePath to the shard.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
// Copy files from archive while under lock to prevent reopening.
newFiles, err := func() ([]string, error) {
e.mu.Lock()
defer e.mu.Unlock()
var newFiles []string
tr := tar.NewReader(r)
for {
if fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF {
break
} else if err != nil {
return nil, err
} else if fileName != "" {
newFiles = append(newFiles, fileName)
}
}
if err := file.SyncDir(e.path); err != nil {
return nil, err
}
// The filestore will only handle tsm files. Other file types will be ignored.
if err := e.FileStore.Replace(nil, newFiles); err != nil {
return nil, err
}
return newFiles, nil
}()
if err != nil {
return err
}
// Load any new series keys to the index
tsmFiles := make([]TSMFile, 0, len(newFiles))
defer func() {
for _, r := range tsmFiles {
r.Close()
}
}()
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range newFiles {
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ext)
if !strings.HasSuffix(f, TSMFileExtension) {
// This isn't a .tsm file.
continue
}
fd, err := os.Open(f)
if err != nil {
return err
}
r, err := NewTSMReader(fd)
if err != nil {
return err
}
tsmFiles = append(tsmFiles, r)
}
// Merge and dedup all the series keys across each reader to reduce
// lock contention on the index.
keys := make([][]byte, 0, 10000)
fieldTypes := make([]cnosql.DataType, 0, 10000)
ki := newMergeKeyIterator(tsmFiles, nil)
for ki.Next() {
key, typ := ki.Read()
fieldType := BlockTypeToCnosQLDataType(typ)
if fieldType == cnosql.Unknown {
return fmt.Errorf("unknown block type: %v", typ)
}
keys = append(keys, key)
fieldTypes = append(fieldTypes, fieldType)
if len(keys) == cap(keys) {
// Send batch of keys to the index.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
// Reset buffers.
keys, fieldTypes = keys[:0], fieldTypes[:0]
}
}
if len(keys) > 0 {
// Add remaining partial batch.
if err := e.addToIndexFromKey(keys, fieldTypes); err != nil {
return err
}
}
return nil
}
// readFileFromBackup copies the next file from the archive into the shard.
// The file is skipped if it does not have a matching shardRelativePath prefix.
// If asNew is true, each file will be installed as a new TSM file even if an
// existing file with the same name in the backup exists.
func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) {
// Read next archive file.
hdr, err := tr.Next()
if err != nil {
return "", err
}
if !strings.HasSuffix(hdr.Name, TSMFileExtension) {
// This isn't a .tsm file.
return "", nil
}
nativeFileName := filepath.FromSlash(hdr.Name)
// Skip file if it does not have a matching prefix.
if !strings.HasPrefix(nativeFileName, shardRelativePath) {
return "", nil
}
filename, err := filepath.Rel(shardRelativePath, nativeFileName)
if err != nil {
return "", err
}
// If this is a directory entry (usually just `index` for tsi), create it an move on.
if hdr.Typeflag == tar.TypeDir {
if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil {
return "", err
}
return "", nil
}
if asNew {
filename = e.formatFileName(e.FileStore.NextGeneration(), 1) + "." + TSMFileExtension
}
tmp := fmt.Sprintf("%s.%s", filepath.Join(e.path, filename), TmpTSMFileExtension)
// Create new file on disk.
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return "", err
}
defer f.Close()
// Copy from archive to the file.
if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
return "", err
}
// Sync to disk & close.
if err := f.Sync(); err != nil {
return "", err
}
return tmp, nil
}
// addToIndexFromKey will pull the measurement names, series keys, and field
// names from composite keys, and add them to the database index and measurement
// fields.
func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []cnosql.DataType) error {
var field []byte
names := make([][]byte, 0, len(keys))
tags := make([]models.Tags, 0, len(keys))
for i := 0; i < len(keys); i++ {
// Replace tsm key format with index key format.
keys[i], field = SeriesAndFieldFromCompositeKey(keys[i])
name := models.ParseName(keys[i])
mf := e.fieldset.CreateFieldsIfNotExists(name)
if err := mf.CreateFieldIfNotExists(field, fieldTypes[i]); err != nil {
return err
}
names = append(names, name)
tags = append(tags, models.ParseTags(keys[i]))
}
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
if err := e.index.InitializeSeries(keys, names, tags); err != nil {
return err
}
} else {
if err := e.index.CreateSeriesListIfNotExists(keys, names, tags); err != nil {
return err
}
}
return nil
}
// WritePoints writes metadata and point data into the engine.
// It returns an error if new points are added to an existing key.
func (e *Engine) WritePoints(points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
keyBuf []byte
baseLen int
seriesErr error
)
for _, p := range points {
keyBuf = append(keyBuf[:0], p.Key()...)
keyBuf = append(keyBuf, keyFieldSeparator...)
baseLen = len(keyBuf)
iter := p.FieldIterator()
t := p.Time().UnixNano()
for iter.Next() {
// Skip fields name "time", they are illegal
if bytes.Equal(iter.FieldKey(), timeBytes) {
continue
}
keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)
if e.seriesTypeMap != nil {
// Fast-path check to see if the field for the series already exists.
if v, ok := e.seriesTypeMap.Get(keyBuf); !ok {
if typ, err := e.Type(keyBuf); err != nil {
// Field type is unknown, we can try to add it.
} else if typ != iter.Type() {
// Existing type is different from what was passed in, we need to drop
// this write and refresh the series type map.
seriesErr = tsdb.ErrFieldTypeConflict
e.seriesTypeMap.Insert(keyBuf, int(typ))
continue
}
// Doesn't exist, so try to insert
vv, ok := e.seriesTypeMap.Insert(keyBuf, int(iter.Type()))
// We didn't insert and the type that exists isn't what we tried to insert, so
// we have a conflict and must drop this field/series.
if !ok || vv != int(iter.Type()) {
seriesErr = tsdb.ErrFieldTypeConflict
continue
}
} else if v != int(iter.Type()) {
// The series already exists, but with a different type. This is also a type conflict
// and we need to drop this field/series.
seriesErr = tsdb.ErrFieldTypeConflict
continue
}
}
var v Value
switch iter.Type() {
case models.Float:
fv, err := iter.FloatValue()
if err != nil {
return err
}
v = NewFloatValue(t, fv)
case models.Integer:
iv, err := iter.IntegerValue()
if err != nil {
return err
}
v = NewIntegerValue(t, iv)
case models.Unsigned:
iv, err := iter.UnsignedValue()
if err != nil {
return err
}
v = NewUnsignedValue(t, iv)
case models.String:
v = NewStringValue(t, iter.StringValue())
case models.Boolean:
bv, err := iter.BooleanValue()
if err != nil {
return err
}
v = NewBooleanValue(t, bv)
default:
return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String())
}
values[string(keyBuf)] = append(values[string(keyBuf)], v)
}
}
e.mu.RLock()
defer e.mu.RUnlock()
// first try to write to the cache
if err := e.Cache.WriteMulti(values); err != nil {
return err
}
if e.WALEnabled {
if _, err := e.WAL.WriteMulti(values); err != nil {
return err
}
}
return seriesErr
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series
func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error {
return e.DeleteSeriesRangeWithPredicate(itr, func(name []byte, tags models.Tags) (int64, int64, bool) {
return min, max, true
})
}
// DeleteSeriesRangeWithPredicate removes the values between min and max (inclusive) from all series
// for which predicate() returns true. If predicate() is nil, then all values in range are removed.
func (e *Engine) DeleteSeriesRangeWithPredicate(itr tsdb.SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error {
var disableOnce bool
// Ensure that the index does not compact away the measurement or series we're
// going to delete before we're done with them.
if tsiIndex, ok := e.index.(*tsi1.Index); ok {
tsiIndex.DisableCompactions()
defer tsiIndex.EnableCompactions()
tsiIndex.Wait()
fs, err := tsiIndex.RetainFileSet()
if err != nil {
return err
}
defer fs.Release()
}
var (
sz int
min, max int64 = math.MinInt64, math.MaxInt64
// Indicator that the min/max time for the current batch has changed and
// we need to flush the current batch before appending to it.
flushBatch bool
)
// These are reversed from min/max to ensure they are different the first time through.
newMin, newMax := int64(math.MaxInt64), int64(math.MinInt64)
// There is no predicate, so setup newMin/newMax to delete the full time range.
if predicate == nil {
newMin = min
newMax = max
}
batch := make([][]byte, 0, 10000)
for {
elem, err := itr.Next()
if err != nil {
return err
} else if elem == nil {
break
}
// See if the series should be deleted and if so, what range of time.
if predicate != nil {
var shouldDelete bool
newMin, newMax, shouldDelete = predicate(elem.Name(), elem.Tags())
if !shouldDelete {
continue
}
// If the min/max happens to change for the batch, we need to flush
// the current batch and start a new one.
flushBatch = (min != newMin || max != newMax) && len(batch) > 0
}
if elem.Expr() != nil {
if v, ok := elem.Expr().(*cnosql.BooleanLiteral); !ok || !v.Val {
return errors.New("fields not supported in WHERE clause during deletion")
}
}
if !disableOnce {
// Disable and abort running compactions so that tombstones added existing tsm
// files don't get removed. This would cause deleted measurements/series to
// re-appear once the compaction completed. We only disable the level compactions
// so that snapshotting does not stop while writing out tombstones. If it is stopped,
// and writing tombstones takes a long time, writes can get rejected due to the cache
// filling up.
e.disableLevelCompactions(true)
defer e.enableLevelCompactions(true)
e.sfile.DisableCompactions()
defer e.sfile.EnableCompactions()
e.sfile.Wait()
disableOnce = true
}
if sz >= deleteFlushThreshold || flushBatch {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
batch = batch[:0]
sz = 0
flushBatch = false
}
// Use the new min/max time for the next iteration
min = newMin
max = newMax
key := models.MakeKey(elem.Name(), elem.Tags())
sz += len(key)
batch = append(batch, key)
}
if len(batch) > 0 {
// Delete all matching batch.
if err := e.deleteSeriesRange(batch, min, max); err != nil {
return err
}
}
e.index.Rebuild()
return nil
}
// deleteSeriesRange removes the values between min and max (inclusive) from all series. This
// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange
// and not directly.
func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
if len(seriesKeys) == 0 {
return nil
}
// Ensure keys are sorted since lower layers require them to be.
if !bytesutil.IsSorted(seriesKeys) {
bytesutil.Sort(seriesKeys)
}
// Min and max time in the engine are slightly different from the query language values.
if min == cnosql.MinTime {
min = math.MinInt64
}
if max == cnosql.MaxTime {
max = math.MaxInt64
}
// Run the delete on each TSM file in parallel
if err := e.FileStore.Apply(func(r TSMFile) error {
// See if this TSM file contains the keys and time range
minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1]
tsmMin, tsmMax := r.KeyRange()
tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin)
tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax)
overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0
if !overlaps || !r.OverlapsTimeRange(min, max) {
return nil
}
// Delete each key we find in the file. We seek to the min key and walk from there.
batch := r.BatchDelete()
n := r.KeyCount()
var j int
for i := r.Seek(minKey); i < n; i++ {
indexKey, _ := r.KeyAt(i)
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 {
j++
}
if j >= len(seriesKeys) {
break
}
if bytes.Equal(seriesKeys[j], seriesKey) {
if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil {
batch.Rollback()
return err
}
}
}
return batch.Commit()
}); err != nil {
return err
}
// find the keys in the cache and remove them
deleteKeys := make([][]byte, 0, len(seriesKeys))
// ApplySerialEntryFn cannot return an error in this invocation.
_ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))
// Cache does not walk keys in sorted order, so search the sorted
// series we need to delete to see if any of the cache keys match.
i := bytesutil.SearchBytes(seriesKeys, seriesKey)
if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {
// k is the measurement + tags + sep + field
deleteKeys = append(deleteKeys, k)
}
return nil
})
// Sort the series keys because ApplyEntryFn iterates over the keys randomly.
bytesutil.Sort(deleteKeys)
e.Cache.DeleteRange(deleteKeys, min, max)
// delete from the WAL
if e.WALEnabled {
if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil {
return err
}
}
// The series are deleted on disk, but the index may still say they exist.
// Depending on the min,max time passed in, the series may or not actually
// exists now. To reconcile the index, we walk the series keys that still exists
// on disk and cross out any keys that match the passed in series. Any series
// left in the slice at the end do not exist and can be deleted from the index.
// Note: this is inherently racy if writes are occurring to the same measurement/series are
// being removed. A write could occur and exist in the cache at this point, but we
// would delete it from the index.
minKey := seriesKeys[0]
// Apply runs this func concurrently. The seriesKeys slice is mutated concurrently
// by different goroutines setting positions to nil.
if err := e.FileStore.Apply(func(r TSMFile) error {
n := r.KeyCount()
var j int
// Start from the min deleted key that exists in this file.
for i := r.Seek(minKey); i < n; i++ {
if j >= len(seriesKeys) {
return nil
}
indexKey, _ := r.KeyAt(i)
seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey)
// Skip over any deleted keys that are less than our tsm key
cmp := bytes.Compare(seriesKeys[j], seriesKey)
for j < len(seriesKeys) && cmp < 0 {
j++
if j >= len(seriesKeys) {
return nil
}
cmp = bytes.Compare(seriesKeys[j], seriesKey)
}
// We've found a matching key, cross it out so we do not remove it from the index.
if j < len(seriesKeys) && cmp == 0 {
seriesKeys[j] = emptyBytes
j++
}
}
return nil
}); err != nil {
return err
}
// Have we deleted all values for the series? If so, we need to remove
// the series from the index.
hasDeleted := false
for _, k := range seriesKeys {
if len(k) > 0 {
hasDeleted = true
break
}
}
if hasDeleted {
buf := make([]byte, 1024) // For use when accessing series file.
ids := tsdb.NewSeriesIDSet()
measurements := make(map[string]struct{}, 1)
for _, k := range seriesKeys {
if len(k) == 0 {
continue // This key was wiped because it shouldn't be removed from index.
}
name, tags := models.ParseKeyBytes(k)
sid := e.sfile.SeriesID(name, tags, buf)
if sid == 0 {
continue
}
// See if this series was found in the cache earlier
i := bytesutil.SearchBytes(deleteKeys, k)
var hasCacheValues bool
// If there are multiple fields, they will have the same prefix. If any field
// has values, then we can't delete it from the index.
for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) {
if e.Cache.Values(deleteKeys[i]).Len() > 0 {
hasCacheValues = true
break
}
i++
}
if hasCacheValues {
continue
}
measurements[string(name)] = struct{}{}
// Remove the series from the local index.
if err := e.index.DropSeries(sid, k, false); err != nil {
return err
}
// Add the id to the set of delete ids.
ids.Add(sid)
}
filesetChanged := false
for k := range measurements {
if dropped, err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil {
return err
} else if dropped {
if err := e.cleanupMeasurement([]byte(k)); err != nil {
return err
}
filesetChanged = true
}
}
if filesetChanged {
if err := e.fieldset.Save(); err != nil {
return err
}
}
// Remove any series IDs for our set that still exist in other shards.
// We cannot remove these from the series file yet.
if err := e.seriesIDSets.ForEach(func(s *tsdb.SeriesIDSet) {
ids = ids.AndNot(s)
}); err != nil {
return err
}
// Remove the remaining ids from the series file as they no longer exist
// in any shard.
var err error
ids.ForEach(func(id uint64) {
name, tags := e.sfile.Series(id)
if err1 := e.sfile.DeleteSeriesID(id); err1 != nil {
err = err1
return
}
// In the case of the inmem index the series can be removed across
// the global index (all shards).
if index, ok := e.index.(*inmem.ShardIndex); ok {
key := models.MakeKey(name, tags)
if e := index.Index.DropSeriesGlobal(key); e != nil {
err = e
}
}
})
if err != nil {
return err
}
}
return nil
}
func (e *Engine) cleanupMeasurement(name []byte) error {
// A sentinel error message to cause DeleteWithLock to not delete the measurement
abortErr := fmt.Errorf("measurements still exist")
// Under write lock, delete the measurement if we no longer have any data stored for
// the measurement. If data exists, we can't delete the field set yet as there
// were writes to the measurement while we are deleting it.
if err := e.fieldset.DeleteWithLock(string(name), func() error {
encodedName := models.EscapeMeasurement(name)
sep := len(encodedName)
// First scan the cache to see if any series exists for this measurement.
if err := e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error {
if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) {
return abortErr
}
return nil
}); err != nil {
return err
}
// Check the filestore.
return e.FileStore.WalkKeys(name, func(k []byte, _ byte) error {
if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) {
return abortErr
}
return nil
})
}); err != nil && err != abortErr {
// Something else failed, return it
return err
}
return nil
}
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) DeleteMeasurement(name []byte) error {
// Attempt to find the series keys.
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
itr, err := indexSet.MeasurementSeriesByExprIterator(name, nil)
if err != nil {
return err
} else if itr == nil {
return nil
}
defer itr.Close()
return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64)
}
// ForEachMeasurementName iterates over each measurement name in the engine.
func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)
}
func (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
return e.index.CreateSeriesIfNotExists(key, name, tags)
}
// WriteTo is not implemented.
func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") }
// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.
func (e *Engine) WriteSnapshot() (err error) {
// Lock and grab the cache snapshot along with all the closed WAL
// filenames associated with the snapshot
started := time.Now()
log, logEnd := logger.NewOperation(e.logger, "Cache snapshot", "tsm1_cache_snapshot")
defer func() {
elapsed := time.Since(started)
e.Cache.UpdateCompactTime(elapsed)
if err == nil {
log.Info("Snapshot for path written", zap.String("path", e.path), zap.Duration("duration", elapsed))
}
logEnd()
}()
closedFiles, snapshot, err := func() (segments []string, snapshot *Cache, err error) {
e.mu.Lock()
defer e.mu.Unlock()
if e.WALEnabled {
if err = e.WAL.CloseSegment(); err != nil {
return
}
segments, err = e.WAL.ClosedSegments()
if err != nil {
return
}
}
snapshot, err = e.Cache.Snapshot()
if err != nil {
return
}
return
}()
if err != nil {
return err
}
if snapshot.Size() == 0 {
e.Cache.ClearSnapshot(true)
return nil
}
// The snapshotted cache may have duplicate points and unsorted data. We need to deduplicate
// it before writing the snapshot. This can be very expensive so it's done while we are not
// holding the engine write lock.
dedup := time.Now()
snapshot.Deduplicate()
e.traceLogger.Info("Snapshot for path deduplicated",
zap.String("path", e.path),
zap.Duration("duration", time.Since(dedup)))
return e.writeSnapshotAndCommit(log, closedFiles, snapshot)
}
// CreateSnapshot will create a temp directory that holds
// temporary hardlinks to the underlying shard files.
func (e *Engine) CreateSnapshot() (string, error) {
if err := e.WriteSnapshot(); err != nil {
return "", err
}
e.mu.RLock()
defer e.mu.RUnlock()
path, err := e.FileStore.CreateSnapshot()
if err != nil {
return "", err
}
// Generate a snapshot of the index.
return path, nil
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) {
defer func() {
if err != nil {
e.Cache.ClearSnapshot(false)
}
}()
// write the new snapshot files
newFiles, err := e.Compactor.WriteSnapshot(snapshot)
if err != nil {
log.Info("Error writing snapshot from compactor", zap.Error(err))
return err
}
e.mu.RLock()
defer e.mu.RUnlock()
// update the file store with these new files
if err := e.FileStore.Replace(nil, newFiles); err != nil {
log.Info("Error adding new TSM files from snapshot. Removing temp files.", zap.Error(err))
// Remove the new snapshot files. We will try again.
for _, file := range newFiles {
if err := os.Remove(file); err != nil {
log.Info("Unable to remove file", zap.String("path", file), zap.Error(err))
}
}
return err
}
// clear the snapshot from the in-memory cache, then the old WAL files
e.Cache.ClearSnapshot(true)
if e.WALEnabled {
if err := e.WAL.Remove(closedFiles); err != nil {
log.Info("Error removing closed WAL segments", zap.Error(err))
}
}
return nil
}
// compactCache continually checks if the WAL cache should be written to disk.
func (e *Engine) compactCache() {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.snapDone
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
e.Cache.UpdateAge()
if e.ShouldCompactCache(time.Now()) {
start := time.Now()
e.traceLogger.Info("Compacting cache", zap.String("path", e.path))
err := e.WriteSnapshot()
if err != nil && err != errCompactionsDisabled {
e.logger.Info("Error writing snapshot", zap.Error(err))
atomic.AddInt64(&e.stats.CacheCompactionErrors, 1)
} else {
atomic.AddInt64(&e.stats.CacheCompactions, 1)
}
atomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())
}
}
}
}
// ShouldCompactCache returns true if the Cache is over its flush threshold
// or if the passed in lastWriteTime is older than the write cold threshold.
func (e *Engine) ShouldCompactCache(t time.Time) bool {
sz := e.Cache.Size()
if sz == 0 {
return false
}
if sz > e.CacheFlushMemorySizeThreshold {
return true
}
return t.Sub(e.Cache.LastWriteTime()) > e.CacheFlushWriteColdDuration
}
func (e *Engine) compact(wg *sync.WaitGroup) {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
e.mu.RLock()
quit := e.done
e.mu.RUnlock()
select {
case <-quit:
return
case <-t.C:
// Find our compaction plans
level1Groups := e.CompactionPlan.PlanLevel(1)
level2Groups := e.CompactionPlan.PlanLevel(2)
level3Groups := e.CompactionPlan.PlanLevel(3)
level4Groups := e.CompactionPlan.Plan(e.LastModified())
atomic.StoreInt64(&e.stats.TSMOptimizeCompactionsQueue, int64(len(level4Groups)))
// If no full compactions are need, see if an optimize is needed
if len(level4Groups) == 0 {
level4Groups = e.CompactionPlan.PlanOptimize()
atomic.StoreInt64(&e.stats.TSMOptimizeCompactionsQueue, int64(len(level4Groups)))
}
// Update the level plan queue stats
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[0], int64(len(level1Groups)))
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[1], int64(len(level2Groups)))
atomic.StoreInt64(&e.stats.TSMCompactionsQueue[2], int64(len(level3Groups)))
// Set the queue depths on the scheduler
e.scheduler.setDepth(1, len(level1Groups))
e.scheduler.setDepth(2, len(level2Groups))
e.scheduler.setDepth(3, len(level3Groups))
e.scheduler.setDepth(4, len(level4Groups))
// Find the next compaction that can run and try to kick it off
if level, runnable := e.scheduler.next(); runnable {
switch level {
case 1:
if e.compactHiPriorityLevel(level1Groups[0], 1, false, wg) {
level1Groups = level1Groups[1:]
}
case 2:
if e.compactHiPriorityLevel(level2Groups[0], 2, false, wg) {
level2Groups = level2Groups[1:]
}
case 3:
if e.compactLoPriorityLevel(level3Groups[0], 3, true, wg) {
level3Groups = level3Groups[1:]
}
case 4:
if e.compactFull(level4Groups[0], wg) {
level4Groups = level4Groups[1:]
}
}
}
// Release all the plans we didn't start.
e.CompactionPlan.Release(level1Groups)
e.CompactionPlan.Release(level2Groups)
e.CompactionPlan.Release(level3Groups)
e.CompactionPlan.Release(level4Groups)
}
}
}
// compactHiPriorityLevel kicks off compactions using the high priority policy. It returns
// true if the compaction was started
func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try hi priority limiter, otherwise steal a little from the low priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
// Return the unused plans
return false
}
// compactLoPriorityLevel kicks off compactions using the lo priority policy. It returns
// the plans that were not able to be started
func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool {
s := e.levelCompactionStrategy(grp, fast, level)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
}
// compactFull kicks off full and optimize compactions using the lo priority policy. It returns
// the plans that were not able to be started.
func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool {
s := e.fullCompactionStrategy(grp, false)
if s == nil {
return false
}
// Try the lo priority limiter, otherwise steal a little from the high priority if we can.
if e.compactionLimiter.TryTake() {
atomic.AddInt64(&e.stats.TSMFullCompactionsActive, 1)
wg.Add(1)
go func() {
defer wg.Done()
defer atomic.AddInt64(&e.stats.TSMFullCompactionsActive, -1)
defer e.compactionLimiter.Release()
s.Apply()
// Release the files in the compaction plan
e.CompactionPlan.Release([]CompactionGroup{s.group})
}()
return true
}
return false
}
// compactionStrategy holds the details of what to do in a compaction.
type compactionStrategy struct {
group CompactionGroup
fast bool
level int
durationStat *int64
activeStat *int64
successStat *int64
errorStat *int64
logger *zap.Logger
compactor *Compactor
fileStore *FileStore
engine *Engine
}
// Apply concurrently compacts all the groups in a compaction strategy.
func (s *compactionStrategy) Apply() {
start := time.Now()
s.compactGroup()
atomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds())
}
// compactGroup executes the compaction strategy against a single CompactionGroup.
func (s *compactionStrategy) compactGroup() {
group := s.group
log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group")
defer logEnd()
log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group)))
for i, f := range group {
log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
var (
err error
files []string
)
if s.fast {
files, err = s.compactor.CompactFast(group)
} else {
files, err = s.compactor.CompactFull(group)
}
if err != nil {
_, inProgress := err.(errCompactionInProgress)
if err == errCompactionsDisabled || inProgress {
log.Info("Aborted compaction", zap.Error(err))
if _, ok := err.(errCompactionInProgress); ok {
time.Sleep(time.Second)
}
return
}
log.Info("Error compacting TSM files", zap.Error(err))
// We hit a bad TSM file - rename so the next compaction can proceed.
if _, ok := err.(errBlockRead); ok {
path := err.(errBlockRead).file
log.Info("Renaming a corrupt TSM file due to compaction error", zap.Error(err))
if err := s.fileStore.ReplaceWithCallback([]string{path}, nil, nil); err != nil {
log.Info("Error removing bad TSM file", zap.Error(err))
} else if e := os.Rename(path, path+"."+BadTSMFileExtension); e != nil {
log.Info("Error renaming corrupt TSM file", zap.Error((err)))
}
}
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil {
log.Info("Error replacing new TSM files", zap.Error(err))
atomic.AddInt64(s.errorStat, 1)
time.Sleep(time.Second)
return
}
for i, f := range files {
log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f))
}
log.Info("Finished compacting files",
zap.Int("tsm1_files_n", len(files)))
atomic.AddInt64(s.successStat, 1)
}
// levelCompactionStrategy returns a compactionStrategy for the given level.
// It returns nil if there are no TSM files to compact.
func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level int) *compactionStrategy {
return &compactionStrategy{
group: group,
logger: e.logger.With(zap.Int("tsm1_level", level), zap.String("tsm1_strategy", "level")),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: fast,
engine: e,
level: level,
activeStat: &e.stats.TSMCompactionsActive[level-1],
successStat: &e.stats.TSMCompactions[level-1],
errorStat: &e.stats.TSMCompactionErrors[level-1],
durationStat: &e.stats.TSMCompactionDuration[level-1],
}
}
// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files.
// It returns nil if there are no TSM files to compact.
func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *compactionStrategy {
s := &compactionStrategy{
group: group,
logger: e.logger.With(zap.String("tsm1_strategy", "full"), zap.Bool("tsm1_optimize", optimize)),
fileStore: e.FileStore,
compactor: e.Compactor,
fast: optimize,
engine: e,
level: 4,
}
if optimize {
s.activeStat = &e.stats.TSMOptimizeCompactionsActive
s.successStat = &e.stats.TSMOptimizeCompactions
s.errorStat = &e.stats.TSMOptimizeCompactionErrors
s.durationStat = &e.stats.TSMOptimizeCompactionDuration
} else {
s.activeStat = &e.stats.TSMFullCompactionsActive
s.successStat = &e.stats.TSMFullCompactions
s.errorStat = &e.stats.TSMFullCompactionErrors
s.durationStat = &e.stats.TSMFullCompactionDuration
}
return s
}
// reloadCache reads the WAL segment files and loads them into the cache.
func (e *Engine) reloadCache() error {
now := time.Now()
files, err := segmentFileNames(e.WAL.Path())
if err != nil {
return err
}
limit := e.Cache.MaxSize()
defer func() {
e.Cache.SetMaxSize(limit)
}()
// Disable the max size during loading
e.Cache.SetMaxSize(0)
loader := NewCacheLoader(files)
loader.WithLogger(e.logger)
if err := loader.Load(e.Cache); err != nil {
return err
}
e.traceLogger.Info("Reloaded WAL cache",
zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now)))
return nil
}
// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid
// removing tmp files that are still in use.
func (e *Engine) cleanup() error {
allfiles, err := ioutil.ReadDir(e.path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range allfiles {
// Check to see if there are any `.tmp` directories that were left over from failed shard snapshots
if f.IsDir() && strings.HasSuffix(f.Name(), ext) {
if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil {
return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err)
}
}
}
return e.cleanupTempTSMFiles()
}
func (e *Engine) cleanupTempTSMFiles() error {
files, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf("*.%s", CompactionTempExtension)))
if err != nil {
return fmt.Errorf("error getting compaction temp files: %s", err.Error())
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return fmt.Errorf("error removing temp compaction files: %v", err)
}
}
return nil
}
// KeyCursor returns a KeyCursor for the given key starting at time t.
func (e *Engine) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor {
return e.FileStore.KeyCursor(ctx, key, t, ascending)
}
// CreateIterator returns an iterator for the measurement based on opt.
func (e *Engine) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) {
if span := tracing.SpanFromContext(ctx); span != nil {
labels := []string{"shard_id", strconv.Itoa(int(e.id)), "measurement", measurement}
if opt.Condition != nil {
labels = append(labels, "cond", opt.Condition.String())
}
span = span.StartSpan("create_iterator")
span.SetLabels(labels...)
ctx = tracing.NewContextWithSpan(ctx, span)
group := metrics.NewGroup(tsmGroup)
ctx = metrics.NewContextWithGroup(ctx, group)
start := time.Now()
defer group.GetTimer(planningTimer).UpdateSince(start)
}
if call, ok := opt.Expr.(*cnosql.Call); ok {
if opt.Interval.IsZero() {
if call.Name == "first" || call.Name == "last" {
refOpt := opt
refOpt.Limit = 1
refOpt.Ascending = call.Name == "first"
refOpt.Ordered = true
refOpt.Expr = call.Args[0]
itrs, err := e.createVarRefIterator(ctx, measurement, refOpt)
if err != nil {
return nil, err
}
return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
}
}
inputs, err := e.createCallIterator(ctx, measurement, call, opt)
if err != nil {
return nil, err
} else if len(inputs) == 0 {
return nil, nil
}
return newMergeFinalizerIterator(ctx, inputs, opt, e.logger)
}
itrs, err := e.createVarRefIterator(ctx, measurement, opt)
if err != nil {
return nil, err
}
return newMergeFinalizerIterator(ctx, itrs, opt, e.logger)
}
type indexTagSets interface {
TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error)
}
func (e *Engine) createCallIterator(ctx context.Context, measurement string, call *cnosql.Call, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := call.Args[0].(*cnosql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
// Determine tagsets for this measurement based on dimensions and filters.
var (
tagSets []*query.TagSet
err error
)
if e.index.Type() == tsdb.InmemIndexName {
ts := e.index.(indexTagSets)
tagSets, err = ts.TagSets([]byte(measurement), opt)
} else {
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt)
}
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return query.ErrQueryInterrupted
default:
}
inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// Wrap each series in a call iterator.
for i, input := range inputs {
if opt.InterruptCh != nil {
input = query.NewInterruptIterator(input, opt.InterruptCh)
}
itr, err := query.NewCallIterator(input, opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
inputs[i] = itr
}
itr := query.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0))
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, opt query.IteratorOptions) ([]query.Iterator, error) {
ref, _ := opt.Expr.(*cnosql.VarRef)
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return nil, err
} else if !exists {
return nil, nil
}
var (
tagSets []*query.TagSet
err error
)
if e.index.Type() == tsdb.InmemIndexName {
ts := e.index.(indexTagSets)
tagSets, err = ts.TagSets([]byte(measurement), opt)
} else {
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt)
}
if err != nil {
return nil, err
}
// Reverse the tag sets if we are ordering by descending.
if !opt.Ascending {
for _, t := range tagSets {
t.Reverse()
}
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt)
if err != nil {
return err
} else if len(inputs) == 0 {
continue
}
// If we have a LIMIT or OFFSET and the grouping of the outer query
// is different than the current grouping, we need to perform the
// limit on each of the individual series keys instead to improve
// performance.
if (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) {
for i, input := range inputs {
inputs[i] = newLimitIterator(input, opt)
}
}
itr, err := query.Iterators(inputs).Merge(opt)
if err != nil {
query.Iterators(inputs).Close()
return err
}
// Apply a limit on the merged iterator.
if opt.Limit > 0 || opt.Offset > 0 {
if len(opt.Dimensions) == len(opt.GroupBy) {
// When the final dimensions and the current grouping are
// the same, we will only produce one series so we can use
// the faster limit iterator.
itr = newLimitIterator(itr, opt)
} else {
// When the dimensions are different than the current
// grouping, we need to account for the possibility there
// will be multiple series. The limit iterator in the
// cnosql package handles that scenario.
itr = query.NewLimitIterator(itr, opt)
}
}
itrs = append(itrs, itr)
}
return nil
}(); err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ctx context.Context, ref *cnosql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) {
// Set parallelism by number of logical cpus.
parallelism := runtime.GOMAXPROCS(0)
if parallelism > len(t.SeriesKeys) {
parallelism = len(t.SeriesKeys)
}
// Create series key groupings w/ return error.
groups := make([]struct {
keys []string
filters []cnosql.Expr
itrs []query.Iterator
err error
}, parallelism)
// Group series keys.
n := len(t.SeriesKeys) / parallelism
for i := 0; i < parallelism; i++ {
group := &groups[i]
if i < parallelism-1 {
group.keys = t.SeriesKeys[i*n : (i+1)*n]
group.filters = t.Filters[i*n : (i+1)*n]
} else {
group.keys = t.SeriesKeys[i*n:]
group.filters = t.Filters[i*n:]
}
}
// Read series groups in parallel.
var wg sync.WaitGroup
for i := range groups {
wg.Add(1)
go func(i int) {
defer wg.Done()
groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ctx, ref, name, groups[i].keys, t, groups[i].filters, opt)
}(i)
}
wg.Wait()
// Determine total number of iterators so we can allocate only once.
var itrN int
for _, group := range groups {
itrN += len(group.itrs)
}
// Combine all iterators together and check for errors.
var err error
itrs := make([]query.Iterator, 0, itrN)
for _, group := range groups {
if group.err != nil {
err = group.err
}
itrs = append(itrs, group.itrs...)
}
// If an error occurred, make sure we close all created iterators.
if err != nil {
query.Iterators(itrs).Close()
return nil, err
}
return itrs, nil
}
// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.
func (e *Engine) createTagSetGroupIterators(ctx context.Context, ref *cnosql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []cnosql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) {
itrs := make([]query.Iterator, 0, len(seriesKeys))
for i, seriesKey := range seriesKeys {
var conditionFields []cnosql.VarRef
if filters[i] != nil {
// Retrieve non-time fields from this series filter and filter out tags.
conditionFields = cnosql.ExprNames(filters[i])
}
itr, err := e.createVarRefSeriesIterator(ctx, ref, name, seriesKey, t, filters[i], conditionFields, opt)
if err != nil {
return itrs, err
} else if itr == nil {
continue
}
itrs = append(itrs, itr)
// Abort if the query was killed
select {
case <-opt.InterruptCh:
query.Iterators(itrs).Close()
return nil, query.ErrQueryInterrupted
default:
}
// Enforce series limit at creation time.
if opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN {
query.Iterators(itrs).Close()
return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", len(itrs), opt.MaxSeriesN)
}
}
return itrs, nil
}
// createVarRefSeriesIterator creates an iterator for a variable reference for a series.
func (e *Engine) createVarRefSeriesIterator(ctx context.Context, ref *cnosql.VarRef, name string, seriesKey string, t *query.TagSet, filter cnosql.Expr, conditionFields []cnosql.VarRef, opt query.IteratorOptions) (query.Iterator, error) {
_, tfs := models.ParseKey([]byte(seriesKey))
tags := query.NewTags(tfs.Map())
// Create options specific for this series.
itrOpt := opt
itrOpt.Condition = filter
var curCounter, auxCounter, condCounter *metrics.Counter
if col := metrics.GroupFromContext(ctx); col != nil {
curCounter = col.GetCounter(numberOfRefCursorsCounter)
auxCounter = col.GetCounter(numberOfAuxCursorsCounter)
condCounter = col.GetCounter(numberOfCondCursorsCounter)
}
// Build main cursor.
var cur cursor
if ref != nil {
cur = e.buildCursor(ctx, name, seriesKey, tfs, ref, opt)
// If the field doesn't exist then don't build an iterator.
if cur == nil {
return nil, nil
}
if curCounter != nil {
curCounter.Add(1)
}
}
// Build auxiliary cursors.
// Tag values should be returned if the field doesn't exist.
var aux []cursorAt
if len(opt.Aux) > 0 {
aux = make([]cursorAt, len(opt.Aux))
for i, ref := range opt.Aux {
// Create cursor from field if a tag wasn't requested.
if ref.Type != cnosql.Tag {
cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil {
if auxCounter != nil {
auxCounter.Add(1)
}
aux[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case cnosql.Float, cnosql.AnyField:
aux[i] = nilFloatLiteralValueCursor
continue
case cnosql.Integer:
aux[i] = nilIntegerLiteralValueCursor
continue
case cnosql.Unsigned:
aux[i] = nilUnsignedLiteralValueCursor
continue
case cnosql.String:
aux[i] = nilStringLiteralValueCursor
continue
case cnosql.Boolean:
aux[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
aux[i] = nilStringLiteralValueCursor
} else {
aux[i] = &literalValueCursor{value: v}
}
}
}
// Remove _tagKey condition field.
// We can't seach on it because we can't join it to _tagValue based on time.
if varRefSliceContains(conditionFields, "_tagKey") {
conditionFields = varRefSliceRemove(conditionFields, "_tagKey")
// Remove _tagKey conditional references from iterator.
itrOpt.Condition = cnosql.RewriteExpr(cnosql.CloneExpr(itrOpt.Condition), func(expr cnosql.Expr) cnosql.Expr {
switch expr := expr.(type) {
case *cnosql.BinaryExpr:
if ref, ok := expr.LHS.(*cnosql.VarRef); ok && ref.Val == "_tagKey" {
return &cnosql.BooleanLiteral{Val: true}
}
if ref, ok := expr.RHS.(*cnosql.VarRef); ok && ref.Val == "_tagKey" {
return &cnosql.BooleanLiteral{Val: true}
}
}
return expr
})
}
// Build conditional field cursors.
// If a conditional field doesn't exist then ignore the series.
var conds []cursorAt
if len(conditionFields) > 0 {
conds = make([]cursorAt, len(conditionFields))
for i, ref := range conditionFields {
// Create cursor from field if a tag wasn't requested.
if ref.Type != cnosql.Tag {
cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt)
if cur != nil {
if condCounter != nil {
condCounter.Add(1)
}
conds[i] = newBufCursor(cur, opt.Ascending)
continue
}
// If a field was requested, use a nil cursor of the requested type.
switch ref.Type {
case cnosql.Float, cnosql.AnyField:
conds[i] = nilFloatLiteralValueCursor
continue
case cnosql.Integer:
conds[i] = nilIntegerLiteralValueCursor
continue
case cnosql.Unsigned:
conds[i] = nilUnsignedLiteralValueCursor
continue
case cnosql.String:
conds[i] = nilStringLiteralValueCursor
continue
case cnosql.Boolean:
conds[i] = nilBooleanLiteralValueCursor
continue
}
}
// If field doesn't exist, use the tag value.
if v := tags.Value(ref.Val); v == "" {
// However, if the tag value is blank then return a null.
conds[i] = nilStringLiteralValueCursor
} else {
conds[i] = &literalValueCursor{value: v}
}
}
}
condNames := cnosql.VarRefs(conditionFields).Strings()
// Limit tags to only the dimensions selected.
dimensions := opt.GetDimensions()
tags = tags.Subset(dimensions)
// If it's only auxiliary fields then it doesn't matter what type of iterator we use.
if ref == nil {
if opt.StripName {
name = ""
}
return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil
}
// Remove name if requested.
if opt.StripName {
name = ""
}
switch cur := cur.(type) {
case floatCursor:
return newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case integerCursor:
return newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case unsignedCursor:
return newUnsignedIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case stringCursor:
return newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
case booleanCursor:
return newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil
default:
panic("unreachable")
}
}
// buildCursor creates an untyped cursor for a field.
func (e *Engine) buildCursor(ctx context.Context, measurement, seriesKey string, tags models.Tags, ref *cnosql.VarRef, opt query.IteratorOptions) cursor {
// Check if this is a system field cursor.
switch ref.Val {
case "_name":
return &stringSliceCursor{values: []string{measurement}}
case "_tagKey":
return &stringSliceCursor{values: tags.Keys()}
case "_tagValue":
return &stringSliceCursor{values: matchTagValues(tags, opt.Condition)}
case "_seriesKey":
return &stringSliceCursor{values: []string{seriesKey}}
}
// Look up fields for measurement.
mf := e.fieldset.FieldsByString(measurement)
if mf == nil {
return nil
}
// Check for system field for field keys.
if ref.Val == "_fieldKey" {
return &stringSliceCursor{values: mf.FieldKeys()}
}
// Find individual field.
f := mf.Field(ref.Val)
if f == nil {
return nil
}
// Check if we need to perform a cast. Performing a cast in the
// engine (if it is possible) is much more efficient than an automatic cast.
if ref.Type != cnosql.Unknown && ref.Type != cnosql.AnyField && ref.Type != f.Type {
switch ref.Type {
case cnosql.Float:
switch f.Type {
case cnosql.Integer:
cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastIntegerCursor{cursor: cur}
case cnosql.Unsigned:
cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &floatCastUnsignedCursor{cursor: cur}
}
case cnosql.Integer:
switch f.Type {
case cnosql.Float:
cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastFloatCursor{cursor: cur}
case cnosql.Unsigned:
cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &integerCastUnsignedCursor{cursor: cur}
}
case cnosql.Unsigned:
switch f.Type {
case cnosql.Float:
cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastFloatCursor{cursor: cur}
case cnosql.Integer:
cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
return &unsignedCastIntegerCursor{cursor: cur}
}
}
return nil
}
// Return appropriate cursor based on type.
switch f.Type {
case cnosql.Float:
return e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt)
case cnosql.Integer:
return e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt)
case cnosql.Unsigned:
return e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt)
case cnosql.String:
return e.buildStringCursor(ctx, measurement, seriesKey, ref.Val, opt)
case cnosql.Boolean:
return e.buildBooleanCursor(ctx, measurement, seriesKey, ref.Val, opt)
default:
panic("unreachable")
}
}
func matchTagValues(tags models.Tags, condition cnosql.Expr) []string {
if condition == nil {
return tags.Values()
}
// Populate map with tag values.
data := map[string]interface{}{}
for _, tag := range tags {
data[string(tag.Key)] = string(tag.Value)
}
// Match against each specific tag.
var values []string
for _, tag := range tags {
data["_tagKey"] = string(tag.Key)
if cnosql.EvalBool(condition, data) {
values = append(values, string(tag.Value))
}
}
return values
}
// IteratorCost produces the cost of an iterator.
func (e *Engine) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) {
// Determine if this measurement exists. If it does not, then no shards are
// accessed to begin with.
if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {
return query.IteratorCost{}, err
} else if !exists {
return query.IteratorCost{}, nil
}
// Determine all of the tag sets for this query.
indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt)
if err != nil {
return query.IteratorCost{}, err
}
// Attempt to retrieve the ref from the main expression (if it exists).
var ref *cnosql.VarRef
if opt.Expr != nil {
if v, ok := opt.Expr.(*cnosql.VarRef); ok {
ref = v
} else if call, ok := opt.Expr.(*cnosql.Call); ok {
if len(call.Args) > 0 {
ref, _ = call.Args[0].(*cnosql.VarRef)
}
}
}
// Count the number of series concatenated from the tag set.
cost := query.IteratorCost{NumShards: 1}
for _, t := range tagSets {
cost.NumSeries += int64(len(t.SeriesKeys))
for i, key := range t.SeriesKeys {
// Retrieve the cost for the main expression (if it exists).
if ref != nil {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
// Retrieve the cost for every auxiliary field since these are also
// iterators that we may have to look through.
// We may want to separate these though as we are unlikely to incur
// anywhere close to the full costs of the auxiliary iterators because
// many of the selected values are usually skipped.
for _, ref := range opt.Aux {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
// Retrieve the expression names in the condition (if there is a condition).
// We will also create cursors for these too.
if t.Filters[i] != nil {
refs := cnosql.ExprNames(t.Filters[i])
for _, ref := range refs {
c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime)
cost = cost.Combine(c)
}
}
}
}
return cost, nil
}
// Type returns FieldType for a series. If the series does not
// exist, ErrUnkownFieldType is returned.
func (e *Engine) Type(series []byte) (models.FieldType, error) {
if typ, err := e.Cache.Type(series); err == nil {
return typ, nil
}
typ, err := e.FileStore.Type(series)
if err != nil {
return 0, err
}
switch typ {
case BlockFloat64:
return models.Float, nil
case BlockInteger:
return models.Integer, nil
case BlockUnsigned:
return models.Unsigned, nil
case BlockString:
return models.String, nil
case BlockBoolean:
return models.Boolean, nil
}
return 0, tsdb.ErrUnknownFieldType
}
func (e *Engine) seriesCost(seriesKey, field string, tmin, tmax int64) query.IteratorCost {
key := SeriesFieldKeyBytes(seriesKey, field)
c := e.FileStore.Cost(key, tmin, tmax)
// Retrieve the range of values within the cache.
cacheValues := e.Cache.Values(key)
c.CachedValues = int64(len(cacheValues.Include(tmin, tmax)))
return c
}
// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.
func SeriesFieldKey(seriesKey, field string) string {
return seriesKey + keyFieldSeparator + field
}
func SeriesFieldKeyBytes(seriesKey, field string) []byte {
b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field))
i := copy(b[:], seriesKey)
i += copy(b[i:], keyFieldSeparatorBytes)
copy(b[i:], field)
return b
}
var (
blockToFieldType = [8]cnosql.DataType{
BlockFloat64: cnosql.Float,
BlockInteger: cnosql.Integer,
BlockBoolean: cnosql.Boolean,
BlockString: cnosql.String,
BlockUnsigned: cnosql.Unsigned,
5: cnosql.Unknown,
6: cnosql.Unknown,
7: cnosql.Unknown,
}
)
func BlockTypeToCnosQLDataType(typ byte) cnosql.DataType { return blockToFieldType[typ&7] }
// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.
func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {
sep := bytes.Index(key, keyFieldSeparatorBytes)
if sep == -1 {
// No field???
return key, nil
}
return key[:sep], key[sep+len(keyFieldSeparator):]
}
func varRefSliceContains(a []cnosql.VarRef, v string) bool {
for _, ref := range a {
if ref.Val == v {
return true
}
}
return false
}
func varRefSliceRemove(a []cnosql.VarRef, v string) []cnosql.VarRef {
if !varRefSliceContains(a, v) {
return a
}
other := make([]cnosql.VarRef, 0, len(a))
for _, ref := range a {
if ref.Val != v {
other = append(other, ref)
}
}
return other
}
| [
"\"CNOSDB_SERIES_TYPE_CHECK_ENABLED\""
]
| []
| [
"CNOSDB_SERIES_TYPE_CHECK_ENABLED"
]
| [] | ["CNOSDB_SERIES_TYPE_CHECK_ENABLED"] | go | 1 | 0 | |
qa/rpc-tests/bip9-softforks.py | #!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.block_store.close()
stop_nodes(self.nodes)
wait_bsmcoinds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.block_store = BlockStore(self.options.tmpdir)
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| []
| []
| []
| [] | [] | python | null | null | null |
deploy/inventory/site/rax.py | #!/usr/bin/env python
# (c) 2013, Jesse Keating <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: rax
short_description: Rackspace Public Cloud external inventory script
description:
- Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
- |
When run against a specific host, this script returns the following
variables:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
where some item can have nested structure.
- credentials are set in a credentials file
version_added: None
options:
creds_file:
description:
- File to find the Rackspace Public Cloud credentials in
required: true
default: null
region:
description:
- An optional value to narrow inventory scope, i.e. DFW, ORD, IAD, LON
required: false
default: null
authors:
- Jesse Keating <[email protected]>
- Paul Durivage <[email protected]>
- Matt Martz <[email protected]>
notes:
- RAX_CREDS_FILE is an optional environment variable that points to a
pyrax-compatible credentials file.
- If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials.
- See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating
- RAX_REGION is an optional environment variable to narrow inventory search
scope
- RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list
- RAX_ENV is an environment variable that will use an environment as
configured in ~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
- RAX_META_PREFIX is an environment variable that changes the prefix used
for meta key/value groups. For compatibility with ec2.py set to
RAX_META_PREFIX=tag
requirements: [ "pyrax" ]
examples:
- description: List server instances
code: RAX_CREDS_FILE=~/.raxpub rax.py --list
- description: List servers in ORD datacenter only
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
- description: List servers in ORD and DFW datacenters
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
- description: Get server details for server named "server.example.com"
code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
'''
import os
import re
import sys
import argparse
import collections
from types import NoneType
try:
import json
except:
import simplejson as json
try:
import pyrax
except ImportError:
print('pyrax is required for this module')
sys.exit(1)
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
def rax_slugify(value):
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in server.metadata.iteritems():
prefix = os.getenv('RAX_META_PREFIX', 'meta')
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = os.getenv('RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
try:
creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE'])
except KeyError, e:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.stderr.write('No value in environment variable %s and/or no '
'credentials file at %s\n'
% (e.message, default_creds_file))
sys.exit(1)
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception, e:
sys.stderr.write("%s: %s\n" % (e, e.message))
sys.exit(1)
regions = []
if region:
regions.append(region)
else:
for region in os.getenv('RAX_REGION', 'all').split(','):
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.stderr.write('Unsupported region %s' % region)
sys.exit(1)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main()
| []
| []
| [
"RAX_META_PREFIX",
"RAX_CREDS_FILE",
"RAX_ENV",
"RAX_REGION"
]
| [] | ["RAX_META_PREFIX", "RAX_CREDS_FILE", "RAX_ENV", "RAX_REGION"] | python | 4 | 0 | |
model/model.py | import argparse
import pandas as pd
import os
import pickle as pkl
import numpy as np
from sklearn.svm import SVC
from sklearn.externals import joblib
import sklearn
#from six import BytesIO
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparameters are described here. In this simple example we are just including one hyperparameter.
parser.add_argument('--C', type=float, default= 1)
parser.add_argument('--kernel', type=str, default='linear')
parser.add_argument('--gamma', type= str, default='scale')
parser.add_argument('--probability', type= bool, default= True)
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args, unknown = parser.parse_known_args()
# Take the set of files and read them all into a single pandas dataframe
with open(os.path.join(args.train, "data.pickle"), 'rb') as handle:
data = pkl.load(handle)
# labels are in the first column
train_y = data['label']
train_X = data['data']
# Now use scikit-learn's NN to train the model.
model = model = SVC(C = args.C,
kernel= args.kernel,
gamma = args.gamma,
probability= args.probability
)
model = model.fit(train_X, train_y)
# Print the coefficients of the trained classifier, and save the coefficients
joblib.dump(model, os.path.join(args.model_dir, "model.joblib"))
def model_fn(model_dir):
"""Deserialized and return fitted model
Note that this should have the same name as the serialized model in the main method
"""
model = joblib.load(os.path.join(model_dir, "model.joblib"))
return model
| []
| []
| [
"SM_CHANNEL_TRAIN",
"SM_MODEL_DIR",
"SM_OUTPUT_DATA_DIR"
]
| [] | ["SM_CHANNEL_TRAIN", "SM_MODEL_DIR", "SM_OUTPUT_DATA_DIR"] | python | 3 | 0 | |
services/github/test/test_service.py | import unittest
import os
from datetime import datetime
from util import Singleton
from .. import GithubService, GithubRemote, GithubRealRemote
@Singleton
class GithubMockRemote(GithubRemote):
def get_notifications(self):
return [{"type": "Issue", "title": "This is an issue"}]
def connect(self, key):
pass
class TestGithubService(unittest.TestCase):
# Don't want to exceed the limit... mock on DONOTMOCK
if "DONOTMOCK_GITHUB" in os.environ:
github_service = GithubService.instance(GithubRealRemote.instance())
else:
print("Mocking remotes...")
github_service = GithubService.instance(GithubMockRemote.instance())
def test_get_notifications_returns_valid_notification(self):
self.github_service.connect()
self.assertTrue(
type(self.github_service.get_notifications()[0]["title"]) is str
)
self.assertTrue(type(self.github_service.get_notifications()[0]["type"]) is str)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/common.py | from subprocess import call, Popen
from tempfile import mkstemp, mkdtemp
import json
import sys
import time
import socket
import ssl
import os
import urllib.request
FNULL = open(os.devnull, 'w')
LOCALHOST = '127.0.0.1'
STATUS_PORT = 13100
TIMEOUT = 5
def run_ghostunnel(args, stdout=None, stderr=None):
"""Helper to run ghostunnel in integration test mode"""
# Default shuthdown timeout to speed up tests (otherwise defaults to 5m)
if not any('shutdown-timeout' in f for f in args):
args.append('--shutdown-timeout=5s')
# Pass args through env var into integration test hook
os.environ["GHOSTUNNEL_INTEGRATION_TEST"] = "true"
os.environ["GHOSTUNNEL_INTEGRATION_ARGS"] = json.dumps(args)
# Print args for debugging
print_ok("running with args:\n {0}".format(' \ \n '.join(args)))
# Run it, hook up stdout/stderr if desired
test = os.path.basename(sys.argv[0]).replace('.py', '.out')
return Popen([
'../ghostunnel.test',
'-test.run=TestIntegrationMain',
'-test.coverprofile=coverage-{0}'.format(test)],
stdout=stdout, stderr=stderr)
def terminate(ghostunnel):
"""Gracefully terminate ghostunnel (with timeout)"""
print_ok("terminating ghostunnel instance")
try:
if ghostunnel:
ghostunnel.terminate()
for _ in range(0, 10):
try:
ghostunnel.wait(timeout=1)
except BaseException:
pass
if ghostunnel.returncode is not None:
print_ok("ghostunnel stopped with exit code {0}".format(
ghostunnel.returncode))
return
time.sleep(1)
print_ok("timeout, killing ghostunnel")
ghostunnel.kill()
except BaseException:
pass
def dump_goroutines():
"""Attempt to dump goroutines via status port/pprof"""
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
try:
sys.stderr.buffer.write(urllib.request.urlopen(
"https://{0}:{1}/debug/pprof/goroutine?debug=1".format(
LOCALHOST, STATUS_PORT),
context=ctx).read())
except Exception as e:
print('unable to dump goroutines:', e)
class RootCert:
"""Helper class to create root + signed certs"""
def __init__(self, name):
self.name = name
self.leaf_certs = []
print_ok("generating {0}.key, {0}.crt".format(name))
call(
'openssl genrsa -out {0}.key 1024'.format(name),
shell=True,
stderr=FNULL)
call(
'openssl req -x509 -new -key {0}.key -days 5 -out {0}_temp.crt -subj /C=US/ST=CA/O=ghostunnel/OU={0}'.format(name),
shell=True)
os.rename("{0}_temp.crt".format(name), "{0}.crt".format(name))
call('chmod 600 {0}.key'.format(name), shell=True)
def create_signed_cert(self, ou, san="IP:127.0.0.1,IP:::1,DNS:localhost"):
print_ok("generating {0}.key, {0}.crt, {0}.p12".format(ou))
fd, openssl_config = mkstemp(dir='.')
os.write(fd, "extendedKeyUsage=clientAuth,serverAuth\n".encode('utf-8'))
os.write(fd, "subjectAltName = {0}".format(san).encode('utf-8'))
call("openssl genrsa -out {0}.key 1024".format(ou),
shell=True, stderr=FNULL)
call(
"openssl req -new -key {0}.key -out {0}.csr -subj /C=US/ST=CA/O=ghostunnel/OU={0}".format(ou),
shell=True,
stderr=FNULL)
call("chmod 600 {0}.key".format(ou), shell=True)
call(
"openssl x509 -req -in {0}.csr -CA {1}.crt -CAkey {1}.key -CAcreateserial -out {0}_temp.crt -days 5 -extfile {2}".format(
ou,
self.name,
openssl_config),
shell=True,
stderr=FNULL)
call(
"openssl pkcs12 -export -out {0}_temp.p12 -in {0}_temp.crt -inkey {0}.key -password pass:".format(ou),
shell=True)
os.rename("{0}_temp.crt".format(ou), "{0}.crt".format(ou))
os.rename("{0}_temp.p12".format(ou), "{0}.p12".format(ou))
os.close(fd)
os.remove(openssl_config)
self.leaf_certs.append(ou)
def __del__(self):
RootCert.cleanup_certs([self.name])
RootCert.cleanup_certs(self.leaf_certs)
@staticmethod
def cleanup_certs(names):
for name in names:
for ext in ["crt", "key", "csr", "srl", "p12"]:
try:
os.remove('{0}.{1}'.format(name, ext))
except OSError:
pass
def print_ok(msg):
print(("\033[92m{0}\033[0m".format(msg)))
######################### Abstract #########################
class MySocket():
def __init__(self):
self.socket = None
def get_socket(self):
return self.socket
def cleanup(self):
self.socket = None # automatically calls close()
######################### TCP #########################
class TcpClient(MySocket):
def __init__(self, port):
super().__init__()
self.port = port
def connect(self, attempts=1, msg=''):
for _ in range(0, attempts):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(TIMEOUT)
self.socket.connect((LOCALHOST, self.port))
print_ok(msg)
return
except Exception as e:
print(e)
print(
"failed to connect to {0}. Trying again...".format(self.port))
time.sleep(1)
raise Exception("Failed to connect to {0}".format(self.port))
class TcpServer(MySocket):
def __init__(self, port):
super().__init__()
self.port = port
self.listener = None
def listen(self):
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.settimeout(TIMEOUT)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind((LOCALHOST, self.port))
self.listener.listen(1)
def accept(self):
self.socket, _ = self.listener.accept()
self.socket.settimeout(TIMEOUT)
self.listener.close()
def cleanup(self):
super().cleanup()
self.listener = None
######################### TLS #########################
class TlsClient(MySocket):
def __init__(self, cert, ca, port, ssl_version=ssl.PROTOCOL_SSLv23):
super().__init__()
self.cert = cert
self.ca = ca
self.port = port
self.ssl_version = ssl_version
self.tls_listener = None
def connect(self, attempts=1, peer=None):
for _ in range(0, attempts):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(TIMEOUT)
if self.cert is not None:
self.socket = ssl.wrap_socket(sock,
keyfile='{0}.key'.format(
self.cert),
certfile='{0}.crt'.format(
self.cert),
ca_certs='{0}.crt'.format(
self.ca),
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=self.ssl_version)
else:
self.socket = ssl.wrap_socket(sock,
ca_certs='{0}.crt'.format(
self.ca),
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=self.ssl_version)
self.socket.connect((LOCALHOST, self.port))
if peer is not None:
if self.socket.getpeercert()['subject'][3][0][1] == peer:
return self
else:
print("Did not connect to expected peer: {0}".format(
self.socket.getpeercert()))
else:
return self
except Exception as e:
print(e)
if attempts == 1:
raise e
print("Trying to connect to {0}...".format(self.port))
time.sleep(1)
raise Exception("did not connect to peer")
class TlsServer(MySocket):
def __init__(
self,
cert,
ca,
port,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23):
super().__init__()
self.cert = cert
self.ca = ca
self.port = port
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.tls_listener = None
def listen(self):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.settimeout(TIMEOUT)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind((LOCALHOST, self.port))
listener.listen(1)
self.tls_listener = ssl.wrap_socket(listener,
server_side=True,
keyfile='{0}.key'.format(
self.cert),
certfile='{0}.crt'.format(
self.cert),
ca_certs='{0}.crt'.format(self.ca),
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version)
def accept(self):
self.socket, _ = self.tls_listener.accept()
self.socket.settimeout(TIMEOUT)
self.tls_listener.close()
def validate_client_cert(self, ou):
if self.socket.getpeercert()['subject'][3][0][1] == ou:
return
raise Exception("did not connect to expected peer: ",
self.socket.getpeercert())
def cleanup(self):
super().cleanup()
self.tls_listener = None
######################### UNIX SOCKET #########################
class UnixClient(MySocket):
def __init__(self):
super().__init__()
self.socket_path = os.path.join(mkdtemp(), 'ghostunnel-test-socket')
def get_socket_path(self):
return self.socket_path
def connect(self, attempts=1, msg=''):
for _ in range(0, attempts):
try:
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.settimeout(TIMEOUT)
self.socket.connect(self.socket_path)
print_ok(msg)
return
except Exception as e:
print(e)
print("failed to connect to {0}. Trying again...".format(
self.socket_path))
time.sleep(1)
raise Exception("Failed to connect to {0}".format(self.socket_path))
def cleanup(self):
super().cleanup()
self.socket = None
os.remove(self.socket_path)
os.rmdir(os.path.dirname(self.socket_path))
class UnixServer(MySocket):
def __init__(self):
super().__init__()
self.socket_path = os.path.join(mkdtemp(), 'ghostunnel-test-socket')
self.listener = None
def get_socket_path(self):
return self.socket_path
def listen(self):
self.listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.listener.settimeout(TIMEOUT)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind(self.socket_path)
self.listener.listen(1)
def accept(self):
self.socket, _ = self.listener.accept()
self.socket.settimeout(TIMEOUT)
def cleanup(self):
super().cleanup()
self.listener = None
os.remove(self.socket_path)
######################### SocketPair #########################
# This is whacky but works. This class represents a pair of sockets which
# correspond to each end of the tunnel. The class lets you verify that sending
# data in one socket shows up on the other. It also allows testing that closing
# one socket closes the other.
class SocketPair():
def __init__(self, client, server):
self.client = client
self.server = server
self.client_sock = None
self.server_sock = None
self.connect()
def cleanup(self):
self.client.cleanup()
self.server.cleanup()
def connect(self):
# calling accept() on a socket blocks until a connection arrives. Ghostunnel
# doesn't create the backend connection until a connection arrives. This
# implies we either need to create threads or we create the server/client
# sockets in a specific order.
self.server.listen()
# note: there might be a bug in the way we handle unix sockets. Ideally,
# the check below should be the first thing we do in SocketPair().
TcpClient(STATUS_PORT).connect(20)
self.client.connect()
self.server.accept()
def validate_can_send_from_client(self, string, msg):
encoded = bytes(string, 'utf-8')
self.client.get_socket().send(encoded)
data = self.server.get_socket().recv(len(encoded))
if data != encoded:
raise Exception("did not received expected string")
print_ok(msg)
def validate_can_send_from_server(self, string, msg):
encoded = bytes(string, 'utf-8')
self.server.get_socket().send(encoded)
data = self.client.get_socket().recv(len(encoded))
if data != encoded:
raise Exception("did not received expected string")
print_ok(msg)
def validate_closing_client_closes_server(self, msg):
print_ok(msg)
self.client.get_socket().shutdown(socket.SHUT_RDWR)
self.client.get_socket().close()
# if the tunnel doesn't close the connection, recv(1) will raise a
# Timeout
self.server.get_socket().recv(1)
def validate_closing_server_closes_client(self, msg):
print_ok(msg)
self.server.get_socket().shutdown(socket.SHUT_RDWR)
self.server.get_socket().close()
# if the tunnel doesn't close the connection, recv(1) will raise a
# Timeout
self.client.get_socket().recv(1)
def validate_client_cert(self, ou, msg):
for _ in range(1, 20):
try:
self.server.validate_client_cert(ou)
print_ok(msg)
return
except Exception as e:
print(e)
print("validate client cert failed, trying again...")
time.sleep(1)
self.cleanup()
self.connect()
raise Exception("did not connect to expected peer.")
def validate_tunnel_ou(self, ou, msg):
peercert = self.client.get_socket().getpeercert()
if peercert['subject'][3][0][1] != ou:
raise Exception("did not connect to expected peer: ", peercert)
print_ok(msg)
| []
| []
| [
"GHOSTUNNEL_INTEGRATION_TEST",
"GHOSTUNNEL_INTEGRATION_ARGS"
]
| [] | ["GHOSTUNNEL_INTEGRATION_TEST", "GHOSTUNNEL_INTEGRATION_ARGS"] | python | 2 | 0 | |
DNBC4tools/tools/utils.py | import os,sys
import time
import logging
import sys
from datetime import timedelta
from subprocess import check_call
from DNBC4tools.__init__ import _root_dir
def str_mkdir(arg):
if not os.path.exists(arg):
os.system('mkdir -p %s'%arg)
def change_path():
os.environ['PATH'] += ':'+'/'.join(str(_root_dir).split('/')[0:-4])+ '/bin'
os.environ['LD_LIBRARY_PATH'] = '/'.join(str(_root_dir).split('/')[0:-4]) + '/lib'
def python_path():
python = '/'.join(str(_root_dir).split('/')[0:-4])+ '/bin/python'
return python
def rm_temp(*args):
for i in args:
os.remove(i)
def start_print_cmd(arg):
print(arg)
check_call(arg,shell=True)
def logging_call(popenargs,name,dir):
today = time.strftime('%Y%m%d', time.localtime(time.time()))
logfile = '%s/log/%s.%s.txt'%(dir,name,today)
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level = logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter
file_handler = logging.FileHandler(logfile,encoding="utf8")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
logger.info('Promgram start...')
logger.info(popenargs)
start = time.time()
check_call(popenargs,shell=True)
logger.info('Promgram end...')
end = time.time()
used = timedelta(seconds=end - start)
logger.info('Program time used: %s', used)
logger.info('\n')
def judgeFilexits(*args):
for input_files in args:
for input_file in input_files.split(','):
if not os.path.exists(input_file):
print(" ------------------------------------------------")
print("Error: Cannot find input file or dir %s."%(str(input_file)))
print(" ------------------------------------------------")
sys.exit()
else:
pass
| []
| []
| [
"LD_LIBRARY_PATH",
"PATH"
]
| [] | ["LD_LIBRARY_PATH", "PATH"] | python | 2 | 0 | |
python2.7/site-packages/twisted/internet/_dumbwin32proc.py | # -*- test-case-name: twisted.test.test_process -*-
"""
http://isometric.sixsided.org/_/gates_in_the_head/
"""
import os
# Win32 imports
import win32api
import win32con
import win32event
import win32file
import win32pipe
import win32process
import win32security
import pywintypes
# security attributes for pipes
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
from zope.interface import implements
from twisted.internet.interfaces import IProcessTransport, IConsumer, IProducer
from twisted.python.win32 import quoteArguments
from twisted.internet import error
from twisted.python import failure
from twisted.internet import _pollingfile
def debug(msg):
import sys
print msg
sys.stdout.flush()
class _Reaper(_pollingfile._PollableResource):
def __init__(self, proc):
self.proc = proc
def checkWork(self):
if win32event.WaitForSingleObject(self.proc.hProcess, 0) != win32event.WAIT_OBJECT_0:
return 0
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
if exitCode == 0:
err = error.ProcessDone(exitCode)
else:
err = error.ProcessTerminated(exitCode)
self.deactivate()
self.proc.protocol.processEnded(failure.Failure(err))
return 0
def _findShebang(filename):
"""
Look for a #! line, and return the value following the #! if one exists, or
None if this file is not a script.
I don't know if there are any conventions for quoting in Windows shebang
lines, so this doesn't support any; therefore, you may not pass any
arguments to scripts invoked as filters. That's probably wrong, so if
somebody knows more about the cultural expectations on Windows, please feel
free to fix.
This shebang line support was added in support of the CGI tests;
appropriately enough, I determined that shebang lines are culturally
accepted in the Windows world through this page:
http://www.cgi101.com/learn/connect/winxp.html
@param filename: str representing a filename
@return: a str representing another filename.
"""
f = file(filename, 'ru')
if f.read(2) == '#!':
exe = f.readline(1024).strip('\n')
return exe
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
class Process(_pollingfile._PollingTimer):
"""A process that integrates with the Twisted event loop.
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
implements(IProcessTransport, IConsumer, IProducer)
buffer = ''
def __init__(self, reactor, protocol, command, args, environment, path):
_pollingfile._PollingTimer.__init__(self, reactor)
self.protocol = protocol
# security attributes for pipes
sAttrs = win32security.SECURITY_ATTRIBUTES()
sAttrs.bInheritHandle = 1
# create the pipes which will connect to the secondary process
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
win32pipe.SetNamedPipeHandleState(self.hStdinW,
win32pipe.PIPE_NOWAIT,
None,
None)
# set the info structure for the new process.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = hStdoutW
StartupInfo.hStdError = hStderrW
StartupInfo.hStdInput = hStdinR
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# Create new handles whose inheritance property is false
pid = win32api.GetCurrentProcess()
tmp = win32api.DuplicateHandle(pid, self.hStdoutR, pid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdoutR)
self.hStdoutR = tmp
tmp = win32api.DuplicateHandle(pid, self.hStderrR, pid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStderrR)
self.hStderrR = tmp
tmp = win32api.DuplicateHandle(pid, self.hStdinW, pid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdinW)
self.hStdinW = tmp
# Add the specified environment to the current environment - this is
# necessary because certain operations are only supported on Windows
# if certain environment variables are present.
env = os.environ.copy()
env.update(environment or {})
cmdline = quoteArguments(args)
# TODO: error detection here.
def doCreate():
self.hProcess, self.hThread, dwPid, dwTid = win32process.CreateProcess(
command, cmdline, None, None, 1, 0, env, path, StartupInfo)
try:
doCreate()
except pywintypes.error, pwte:
if not _invalidWin32App(pwte):
# This behavior isn't _really_ documented, but let's make it
# consistent with the behavior that is documented.
raise OSError(pwte)
else:
# look for a shebang line. Insert the original 'command'
# (actually a script) into the new arguments list.
sheb = _findShebang(command)
if sheb is None:
raise OSError(
"%r is neither a Windows executable, "
"nor a script with a shebang line" % command)
else:
args = list(args)
args.insert(0, command)
cmdline = quoteArguments(args)
origcmd = command
command = sheb
try:
# Let's try again.
doCreate()
except pywintypes.error, pwte2:
# d'oh, failed again!
if _invalidWin32App(pwte2):
raise OSError(
"%r has an invalid shebang line: "
"%r is not a valid executable" % (
origcmd, sheb))
raise OSError(pwte2)
win32file.CloseHandle(self.hThread)
# close handles which only the child will use
win32file.CloseHandle(hStderrW)
win32file.CloseHandle(hStdoutW)
win32file.CloseHandle(hStdinR)
self.closed = 0
self.closedNotifies = 0
# set up everything
self.stdout = _pollingfile._PollableReadPipe(
self.hStdoutR,
lambda data: self.protocol.childDataReceived(1, data),
self.outConnectionLost)
self.stderr = _pollingfile._PollableReadPipe(
self.hStderrR,
lambda data: self.protocol.childDataReceived(2, data),
self.errConnectionLost)
self.stdin = _pollingfile._PollableWritePipe(
self.hStdinW, self.inConnectionLost)
for pipewatcher in self.stdout, self.stderr, self.stdin:
self._addPollableResource(pipewatcher)
# notify protocol
self.protocol.makeConnection(self)
# (maybe?) a good idea in win32er, otherwise not
# self.reactor.addEvent(self.hProcess, self, 'inConnectionLost')
def signalProcess(self, signalID):
if signalID in ("INT", "TERM", "KILL"):
win32process.TerminateProcess(self.hProcess, 1)
def write(self, data):
"""Write data to the process' stdin."""
self.stdin.write(data)
def writeSequence(self, seq):
"""Write data to the process' stdin."""
self.stdin.writeSequence(seq)
def closeChildFD(self, fd):
if fd == 0:
self.closeStdin()
elif fd == 1:
self.closeStdout()
elif fd == 2:
self.closeStderr()
else:
raise NotImplementedError("Only standard-IO file descriptors available on win32")
def closeStdin(self):
"""Close the process' stdin.
"""
self.stdin.close()
def closeStderr(self):
self.stderr.close()
def closeStdout(self):
self.stdout.close()
def loseConnection(self):
"""Close the process' stdout, in and err."""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.protocol.childConnectionLost(1)
self.connectionLostNotify()
def errConnectionLost(self):
self.protocol.childConnectionLost(2)
self.connectionLostNotify()
def inConnectionLost(self):
self.protocol.childConnectionLost(0)
self.connectionLostNotify()
def connectionLostNotify(self):
"""Will be called 3 times, by stdout/err threads and process handle."""
self.closedNotifies = self.closedNotifies + 1
if self.closedNotifies == 3:
self.closed = 1
self._addPollableResource(_Reaper(self))
# IConsumer
def registerProducer(self, producer, streaming):
self.stdin.registerProducer(producer, streaming)
def unregisterProducer(self):
self.stdin.unregisterProducer()
# IProducer
def pauseProducing(self):
self._pause()
def resumeProducing(self):
self._unpause()
def stopProducing(self):
self.loseConnection()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
messaging/message.go | package messaging
import (
"encoding/json"
"fmt"
result "github.com/oms-services/messagebird/result"
"github.com/messagebird/go-rest-api"
"github.com/messagebird/go-rest-api/sms"
"net/http"
"os"
)
type SMS struct {
From string `json:"from"`
To string `json:"to"`
Message string `json:"message"`
}
//Send SMS
func Send(responseWriter http.ResponseWriter, request *http.Request) {
var apiKey = os.Getenv("API_KEY")
decoder := json.NewDecoder(request.Body)
var param SMS
decodeErr := decoder.Decode(¶m)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
client := messagebird.New(apiKey)
msg, sendErr := sms.Create(client, param.From, []string{param.To}, param.Message, nil)
if sendErr != nil {
fmt.Println("sendErr ::", sendErr)
result.WriteErrorResponse(responseWriter, sendErr)
return
}
bytes, _ := json.Marshal(msg)
result.WriteJsonResponse(responseWriter, bytes, http.StatusOK)
}
| [
"\"API_KEY\""
]
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | go | 1 | 0 | |
todo_api/wsgi.py | """
WSGI config for todo_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todo_api.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
app/main.py | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import sys
import os
import json
import logging
import base64
from adal import AuthenticationContext
from azure.keyvault.key_vault_client import KeyVaultClient
from msrestazure.azure_active_directory import AdalAuthentication
logging.basicConfig(level=logging.INFO,
format='|%(asctime)s|%(levelname)-5s|%(process)d|%(thread)d|%(name)s|%(message)s')
_logger = logging.getLogger('keyvault-agent')
AZURE_AUTHORITY_SERVER = os.getenv('AZURE_AUTHORITY_SERVER', 'https://login.microsoftonline.com/')
VAULT_RESOURCE_NAME = os.getenv('VAULT_RESOURCE_NAME', 'https://vault.azure.net')
class KeyVaultAgent(object):
"""
A Key Vault agent that reads secrets from Key Vault and stores them in a folder
"""
def __init__(self):
self._parse_sp_file()
self._secrets_output_folder = None
self._certs_output_folder = None
self._keys_output_folder = None
self._cert_keys_output_folder = None
def _parse_sp_file(self):
file_path = os.getenv('SERVICE_PRINCIPLE_FILE_PATH')
_logger.info('Parsing Service Principle file from: %s', file_path)
if not os.path.isfile(file_path):
raise Exception("Service Principle file doesn't exist: %s" % file_path)
with open(file_path, 'r') as sp_file:
sp_data = json.load(sp_file)
# retrieve the relevant values used to authenticate with Key Vault
self.tenant_id = sp_data['tenantId']
self.client_id = sp_data['aadClientId']
self.client_secret = sp_data['aadClientSecret']
_logger.info('Parsing Service Principle file completed')
def _get_client(self):
authority = '/'.join([AZURE_AUTHORITY_SERVER.rstrip('/'), self.tenant_id])
_logger.info('Using authority: %s', authority)
context = AuthenticationContext(authority)
_logger.info('Using vault resource name: %s and client id: %s', VAULT_RESOURCE_NAME, self.client_id)
credentials = AdalAuthentication(context.acquire_token_with_client_credentials, VAULT_RESOURCE_NAME,
self.client_id, self.client_secret)
return KeyVaultClient(credentials)
def grab_secrets(self):
"""
Gets secrets from KeyVault and stores them in a folder
"""
vault_base_url = os.getenv('VAULT_BASE_URL')
secrets_keys = os.getenv('SECRETS_KEYS')
certs_keys = os.getenv('CERTS_KEYS')
output_folder = os.getenv('SECRETS_FOLDER')
self._secrets_output_folder = os.path.join(output_folder, "secrets")
self._certs_output_folder = os.path.join(output_folder, "certs")
self._keys_output_folder = os.path.join(output_folder, "keys")
self._cert_keys_output_folder = os.path.join(output_folder, "certs_keys")
for folder in (self._secrets_output_folder, self._certs_output_folder, self._keys_output_folder, self._cert_keys_output_folder):
if not os.path.exists(folder):
os.makedirs(folder)
client = self._get_client()
_logger.info('Using vault: %s', vault_base_url)
if secrets_keys is not None:
for key_info in filter(None, secrets_keys.split(';')):
# Secrets are not renamed. They will have same name
# Certs and keys can be renamed
key_name, key_version, cert_filename, key_filename = self._split_keyinfo(key_info)
_logger.info('Retrieving secret name:%s with version: %s output certFileName: %s keyFileName: %s', key_name, key_version, cert_filename, key_filename)
secret = client.get_secret(vault_base_url, key_name, key_version)
if secret.kid is not None:
_logger.info('Secret is backing certificate. Dumping private key and certificate.')
if secret.content_type == 'application/x-pkcs12':
self._dump_pfx(secret.value, cert_filename, key_filename)
else:
_logger.error('Secret is not in pkcs12 format')
sys.exit(1)
elif (key_name != cert_filename):
_logger.error('Cert filename provided for secret %s not backing a certificate.', key_name)
sys.exit(('Error: Cert filename provided for secret {0} not backing a certificate.').format(key_name))
# secret has same name as key_name
output_path = os.path.join(self._secrets_output_folder, key_name)
_logger.info('Dumping secret value to: %s', output_path)
with open(output_path, 'w') as secret_file:
secret_file.write(self._dump_secret(secret))
if certs_keys is not None:
for key_info in filter(None, certs_keys.split(';')):
# only cert_filename is needed, key_filename is ignored with _
key_name, key_version, cert_filename, _ = self._split_keyinfo(key_info)
_logger.info('Retrieving cert name:%s with version: %s output certFileName: %s', key_name, key_version, cert_filename)
cert = client.get_certificate(vault_base_url, key_name, key_version)
output_path = os.path.join(self._certs_output_folder, cert_filename)
_logger.info('Dumping cert value to: %s', output_path)
with open(output_path, 'w') as cert_file:
cert_file.write(self._cert_to_pem(cert.cer))
def _dump_pfx(self, pfx, cert_filename, key_filename):
from OpenSSL import crypto
p12 = crypto.load_pkcs12(base64.decodestring(pfx))
pk = crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())
certs = (p12.get_certificate(),) + (p12.get_ca_certificates() or ())
if (cert_filename == key_filename):
key_path = os.path.join(self._keys_output_folder, key_filename)
cert_path = os.path.join(self._certs_output_folder, cert_filename)
else:
# write to certs_keys folder when cert_filename and key_filename specified
key_path = os.path.join(self._cert_keys_output_folder, key_filename)
cert_path = os.path.join(self._cert_keys_output_folder, cert_filename)
_logger.info('Dumping key value to: %s', key_path)
with open(key_path, 'w') as key_file:
key_file.write(pk)
_logger.info('Dumping certs to: %s', cert_path)
with open(cert_path, 'w') as cert_file:
for cert in certs:
cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
@staticmethod
def _dump_secret(secret):
value = secret.value
if secret.tags is not None and 'file-encoding' in secret.tags:
encoding = secret.tags['file-encoding']
if encoding == 'base64':
value = base64.decodestring(value)
return value
@staticmethod
def _split_keyinfo(key_info):
key_parts = key_info.strip().split(':')
key_name = key_parts[0]
key_version = '' if len(key_parts) < 2 else key_parts[1]
cert_filename = key_name if len(key_parts) < 3 else key_parts[2]
# key_filename set to cert_filename when only cert_filename is given
# key_filename default to key_name when cert and key filenames are not given
key_filename = cert_filename if len(key_parts) < 4 else key_parts[3]
return key_name, key_version, cert_filename, key_filename
@staticmethod
def _cert_to_pem(cert):
encoded = base64.encodestring(cert)
if isinstance(encoded, bytes):
encoded = encoded.decode("utf-8")
encoded = '-----BEGIN CERTIFICATE-----\n' + encoded + '-----END CERTIFICATE-----\n'
return encoded
if __name__ == '__main__':
_logger.info('Grabbing secrets from Key Vault')
KeyVaultAgent().grab_secrets()
_logger.info('Done!')
| []
| []
| [
"AZURE_AUTHORITY_SERVER",
"SECRETS_KEYS",
"SERVICE_PRINCIPLE_FILE_PATH",
"SECRETS_FOLDER",
"CERTS_KEYS",
"VAULT_BASE_URL",
"VAULT_RESOURCE_NAME"
]
| [] | ["AZURE_AUTHORITY_SERVER", "SECRETS_KEYS", "SERVICE_PRINCIPLE_FILE_PATH", "SECRETS_FOLDER", "CERTS_KEYS", "VAULT_BASE_URL", "VAULT_RESOURCE_NAME"] | python | 7 | 0 | |
serverless/regulations-core/rebuild_index.py | #!/usr/bin/env python
import os
import sys
def handler(event, context):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "regcore.settings.pgsql")
import django
django.setup()
from django.db import connection
connection.ensure_connection()
if not connection.is_usable():
raise Exception("database is unreachable")
from django.core.management import call_command
call_command('rebuild_pgsql_index')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
recipe/gen_patch_json.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import copy
import json
import os
from os.path import join, isdir
import sys
import tqdm
import re
import requests
import pkg_resources
from get_license_family import get_license_family
CHANNEL_NAME = "conda-forge"
CHANNEL_ALIAS = "https://conda.anaconda.org"
SUBDIRS = (
"noarch",
"linux-64",
"linux-armv7l",
"linux-aarch64",
"linux-ppc64le",
"osx-64",
"osx-arm64",
"win-32",
"win-64",
)
REMOVALS = {
"noarch": (
"sendgrid-5.3.0-py_0.tar.bz2",
),
"linux-64": (
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"airflow-with-gcp_api-1.9.0-3.tar.bz2",
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"gdk-pixbuf-2.36.9-0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"libgsasl-1.8.0-py36_1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"postgis-2.4.3+9.6.8-0.tar.bz2",
"pyarrow-0.1.post-0.tar.bz2",
"pyarrow-0.1.post-1.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"rapidpy-2.5.2-py36_0.tar.bz2",
"smesh-8.3.0b0-1.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"tokenize-rt-2.0.1-py27_0.tar.bz2",
"vaex-core-0.4.0-py27_0.tar.bz2",
),
"osx-64": (
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"arpack-3.6.1-blas_openblash1f444ea_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"flask-rest-orm-0.5.0-py35_0.tar.bz2",
"flask-rest-orm-0.5.0-py36_0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"lammps-2018.03.16-.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"mpb-1.6.2-1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"reentry-1.1.0-py27_0.tar.bz2",
"resampy-0.2.0-py27_0.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"sundials-3.1.0-blas_openblash0edd121_202.tar.bz2",
"vlfeat-0.9.20-h470a237_2.tar.bz2",
"xtensor-python-0.19.1-h3e44d54_0.tar.bz2",
),
"osx-arm64": (
),
"win-32": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
),
"win-64": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
),
}
OPERATORS = ["==", ">=", "<=", ">", "<", "!="]
OSX_SDK_FIXES = {
'nodejs-12.8.0-hec2bf70_1': '10.10',
'nodejs-12.1.0-h6de7cb9_1': '10.10',
'nodejs-12.3.1-h6de7cb9_0': '10.10',
'nodejs-12.9.0-hec2bf70_0': '10.10',
'nodejs-12.9.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-hec2bf70_1': '10.10',
'nodejs-12.10.0-hec2bf70_0': '10.10',
'nodejs-12.4.0-h6de7cb9_0': '10.10',
'nodejs-12.11.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-h6de7cb9_0': '10.10',
'nodejs-12.3.0-h6de7cb9_0': '10.10',
'nodejs-10.16.3-hec2bf70_0': '10.10',
'nodejs-12.12.0-hfddbe92_0': '10.10',
'nodejs-12.8.1-hec2bf70_0': '10.10',
'javafx-sdk-11.0.4-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_0': '10.11',
'javafx-sdk-11.0.4-h6dcaf97_0': '10.11',
'qt-5.12.1-h1b46049_0': '10.12',
'qt-5.9.7-h8cf7e54_3': '10.12',
'qt-5.9.7-h93ee506_0': '10.12',
'qt-5.9.7-h93ee506_1': '10.12',
'qt-5.12.5-h1b46049_0': '10.12',
'qt-5.9.7-h93ee506_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_1': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_2': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_1': '10.12',
'freecad-0.18.3-py37h4764a83_2': '10.12',
'freecad-0.18.3-py37hc453731_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_0': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_1': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_2': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_1': '10.12',
'openmpi-mpicc-4.0.1-h516909a_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_0': '10.12',
'openmpi-mpifort-4.0.1-h6ad152f_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_0': '10.12',
'openmpi-mpifort-4.0.1-he991be0_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_2': '10.12',
'reaktoro-1.0.7-py37h99eb986_0': '10.12',
'reaktoro-1.0.7-py37h99eb986_1': '10.12',
'reaktoro-1.0.7-py36h99eb986_0': '10.12',
'reaktoro-1.0.7-py36h99eb986_1': '10.12',
'pyqt-5.12.3-py38he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py36he22c54c_1': '10.12',
'pyqt-5.9.2-py27h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_4': '10.12',
'pyqt-5.9.2-py36h2a560b1_3': '10.12',
'pyqt-5.9.2-py27h2a560b1_2': '10.12',
'pyqt-5.9.2-py36h2a560b1_1': '10.12',
'pyqt-5.12.3-py27h2a560b1_0': '10.12',
'pyqt-5.12.3-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py27he22c54c_0': '10.12',
'pyqt-5.12.3-py27he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_2': '10.12',
'pyqt-5.9.2-py37h2a560b1_1': '10.12',
'pyqt-5.9.2-py36h2a560b1_0': '10.12',
'pyqt-5.9.2-py36h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_0': '10.12',
'pyqt-5.9.2-py37h2a560b1_3': '10.12',
'pyqt-5.12.3-py38he22c54c_0': '10.12',
'pyqt-5.9.2-py27h2a560b1_3': '10.12',
'pyqt-5.9.2-py36h2a560b1_2': '10.12',
'pyqt-5.12.3-py37he22c54c_0': '10.12',
'pyqt-5.12.3-py36he22c54c_0': '10.12',
'pyqt-5.12.3-py37he22c54c_1': '10.12',
'pyqt-5.12.3-py36h2a560b1_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_0': '10.12',
'openmpi-4.0.1-ha90c164_2': '10.12',
'openmpi-4.0.1-ha90c164_0': '10.12',
'openmpi-4.0.1-hfcebdee_2': '10.12',
'openmpi-4.0.1-ha90c164_1': '10.12',
'openmpi-4.0.1-hc99cbb1_1': '10.12',
'openmpi-4.0.1-hc99cbb1_0': '10.12',
'openmpi-4.0.1-hc99cbb1_2': '10.12',
}
def _add_removals(instructions, subdir):
r = requests.get(
"https://conda.anaconda.org/conda-forge/"
"label/broken/%s/repodata.json" % subdir
)
if r.status_code != 200:
r.raise_for_status()
data = r.json()
currvals = list(REMOVALS.get(subdir, []))
for pkg_name in data["packages"]:
currvals.append(pkg_name)
instructions["remove"].extend(tuple(set(currvals)))
def _gen_patch_instructions(index, new_index, subdir):
instructions = {
"patch_instructions_version": 1,
"packages": defaultdict(dict),
"revoke": [],
"remove": [],
}
_add_removals(instructions, subdir)
# diff all items in the index and put any differences in the instructions
for fn in index:
assert fn in new_index
# replace any old keys
for key in index[fn]:
assert key in new_index[fn], (key, index[fn], new_index[fn])
if index[fn][key] != new_index[fn][key]:
instructions['packages'][fn][key] = new_index[fn][key]
# add any new keys
for key in new_index[fn]:
if key not in index[fn]:
instructions['packages'][fn][key] = new_index[fn][key]
return instructions
def has_dep(record, name):
return any(dep.split(' ')[0] == name for dep in record.get('depends', ()))
def get_python_abi(version, subdir, build=None):
if build is not None:
m = re.match(".*py\d\d", build)
if m:
version = f"{m.group()[-2]}.{m.group()[-1]}"
if version.startswith("2.7"):
if subdir.startswith("linux"):
return "cp27mu"
return "cp27m"
elif version.startswith("2.6"):
if subdir.startswith("linux"):
return "cp26mu"
return "cp26m"
elif version.startswith("3.4"):
return "cp34m"
elif version.startswith("3.5"):
return "cp35m"
elif version.startswith("3.6"):
return "cp36m"
elif version.startswith("3.7"):
return "cp37m"
elif version.startswith("3.8"):
return "cp38"
elif version.startswith("3.9"):
return "cp39"
return None
# Workaround for https://github.com/conda/conda-build/pull/3868
def remove_python_abi(record):
if record['name'] in ['python', 'python_abi', 'pypy']:
return
if not has_dep(record, 'python_abi'):
return
depends = record.get('depends', [])
record['depends'] = [dep for dep in depends if dep.split(" ")[0] != "python_abi"]
changes = set([])
def add_python_abi(record, subdir):
record_name = record['name']
# Make existing python and python-dependent packages conflict with pypy
if record_name == "python" and not record['build'].endswith("pypy"):
version = record['version']
new_constrains = record.get('constrains', [])
python_abi = get_python_abi(version, subdir)
new_constrains.append(f"python_abi * *_{python_abi}")
record['constrains'] = new_constrains
return
if has_dep(record, 'python') and not has_dep(record, 'pypy') and not has_dep(record, 'python_abi'):
python_abi = None
new_constrains = record.get('constrains', [])
build = record["build"]
ver_strict_found = False
ver_relax_found = False
for dep in record.get('depends', []):
dep_split = dep.split(' ')
if dep_split[0] == 'python':
if len(dep_split) == 3:
continue
if len(dep_split) == 1:
continue
elif dep_split[1] == "<3":
python_abi = get_python_abi("2.7", subdir, build)
elif dep_split[1].startswith(">="):
m = cb_pin_regex.match(dep_split[1])
if m == None:
python_abi = get_python_abi("", subdir, build)
else:
lower = pad_list(m.group("lower").split("."), 2)[:2]
upper = pad_list(m.group("upper").split("."), 2)[:2]
if lower[0] == upper[0] and int(lower[1]) + 1 == int(upper[1]):
python_abi = get_python_abi(m.group("lower"), subdir, build)
else:
python_abi = get_python_abi("", subdir, build)
else:
python_abi = get_python_abi(dep_split[1], subdir, build)
if python_abi:
new_constrains.append(f"python_abi * *_{python_abi}")
changes.add((dep, f"python_abi * *_{python_abi}"))
ver_strict_found = True
else:
ver_relax_found = True
if not ver_strict_found and ver_relax_found:
new_constrains.append("pypy <0a0")
record['constrains'] = new_constrains
def _gen_new_index(repodata, subdir):
"""Make any changes to the index by adjusting the values directly.
This function returns the new index with the adjustments.
Finally, the new and old indices are then diff'ed to produce the repo
data patches.
"""
index = copy.deepcopy(repodata["packages"])
# deal with windows vc features
if subdir.startswith("win-"):
python_vc_deps = {
'2.6': 'vc 9.*',
'2.7': 'vc 9.*',
'3.3': 'vc 10.*',
'3.4': 'vc 10.*',
'3.5': 'vc 14.*',
'3.6': 'vc 14.*',
'3.7': 'vc 14.*',
}
for fn, record in index.items():
record_name = record['name']
if record_name == 'python':
# remove the track_features key
if 'track_features' in record:
record['track_features'] = None
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append(python_vc_deps[record['version'][:3]])
record['depends'] = depends
elif 'vc' in record.get('features', ''):
# remove vc from the features key
vc_version = _extract_and_remove_vc_feature(record)
if vc_version:
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append('vc %d.*' % vc_version)
record['depends'] = depends
proj4_fixes = {"cartopy", "cdo", "gdal", "libspatialite", "pynio", "qgis"}
for fn, record in index.items():
record_name = record["name"]
if record.get('timestamp', 0) < 1604417730000:
if subdir == 'noarch':
remove_python_abi(record)
else:
add_python_abi(record, subdir)
if "license" in record and "license_family" not in record and record["license"]:
family = get_license_family(record["license"])
if family:
record['license_family'] = family
# remove dependency from constrains for twisted
if record_name == "twisted":
new_constrains = [dep for dep in record.get('constrains', ())
if not dep.startswith("pyobjc-framework-cococa")]
if new_constrains != record.get('constrains', ()):
record['constrains'] = new_constrains
if record_name == "starlette-base":
if not any(dep.split(' ')[0] == "starlette" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append(f"starlette {record['version']}")
else:
record['constrains'] = [f"starlette {record['version']}"]
if record_name == "pytorch" and record.get('timestamp', 0) < 1610297816658:
# https://github.com/conda-forge/pytorch-cpu-feedstock/issues/29
if not any(dep.split(' ')[0] == 'typing_extensions'
for dep in record.get('depends', ())):
if 'depends' in record:
record['depends'].append("typing_extensions")
else:
record['depends'] = ["typing_extensions"]
if record_name == "ipython" and record.get('timestamp', 0) < 1609621539000:
# https://github.com/conda-forge/ipython-feedstock/issues/127
if any(dep.split(' ')[0] == "jedi" for dep in record.get('depends', ())):
record['depends'].append('jedi <0.18')
if record_name == "kartothek" and record.get('timestamp', 0) < 1611565264000:
# https://github.com/conda-forge/kartothek-feedstock/issues/36
if "zstandard" in record['depends']:
i = record['depends'].index('zstandard')
record['depends'][i] = 'zstandard <0.15'
if record_name == "gitdb" and record['version'].startswith('4.0.') and 'smmap >=3.0.1' in record['depends']:
i = record['depends'].index('smmap >=3.0.1')
record['depends'][i] = 'smmap >=3.0.1,<4'
if record_name == "arrow-cpp":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if "aws-sdk-cpp" in record['depends']:
i = record['depends'].index('aws-sdk-cpp')
record['depends'][i] = 'aws-sdk-cpp 1.7.164'
if record_name == "pyarrow":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if record_name == "kartothek":
if record["version"] in ["3.15.0", "3.15.1", "3.16.0"] \
and "pyarrow >=0.13.0,!=0.14.0,<2" in record["depends"]:
i = record["depends"].index("pyarrow >=0.13.0,!=0.14.0,<2")
record["depends"][i] = "pyarrow >=0.17.1,<2"
# distributed <2.11.0 does not work with msgpack-python >=1.0
# newer versions of distributed require at least msgpack-python >=0.6.0
# so we can fix cases where msgpack-python is unbounded
# https://github.com/conda-forge/distributed-feedstock/pull/114
if record_name == 'distributed':
if 'msgpack-python' in record['depends']:
i = record['depends'].index('msgpack-python')
record['depends'][i] = 'msgpack-python <1.0.0'
# python-language-server <=0.31.9 requires pyflakes <2.2.2
# included explicitly in 0.31.10+
# https://github.com/conda-forge/python-language-server-feedstock/pull/50
version = record['version']
if record_name == 'python-language-server':
pversion = pkg_resources.parse_version(version)
v0_31_9 = pkg_resources.parse_version('0.31.9')
if pversion <= v0_31_9 and 'pyflakes >=1.6.0' in record['depends']:
i = record['depends'].index('pyflakes >=1.6.0')
record['depends'][i] = 'pyflakes >=1.6.0,<2.2.0'
# aioftp >=0.17.0 requires python >=3.7
# aioftp 0.17.x was incorrectly built with 3.6 support
# https://github.com/conda-forge/aioftp-feedstock/pull/12
version = record['version']
if record_name == 'aioftp':
pversion = pkg_resources.parse_version(version)
base_version = pkg_resources.parse_version('0.17.0')
max_version = pkg_resources.parse_version('0.17.2')
if base_version <= pversion <= max_version and 'python >=3.6' in record['depends']:
i = record['depends'].index('python >=3.6')
record['depends'][i] = 'python >=3.7'
# numpydoc >=1.0.0 requires python >=3.5
# https://github.com/conda-forge/numpydoc-feedstock/pull/14
version = record['version']
if record_name == 'numpydoc':
pversion = pkg_resources.parse_version(version)
v1_0_0 = pkg_resources.parse_version('1.0.0')
v1_1_0 = pkg_resources.parse_version('1.1.0')
if v1_0_0 <= pversion <= v1_1_0 and 'python' in record['depends']:
i = record['depends'].index('python')
record['depends'][i] = 'python >=3.5'
# pip >=21 requires python >=3.6 but the first build has >=3
# https://github.com/conda-forge/pip-feedstock/pull/68
if record_name == 'pip':
if record['version'] == "21.0" and record['build'] == "pyhd8ed1ab_0":
i = record['depends'].index('python >=3')
record['depends'][i] = 'python >=3.6'
# fix deps with wrong names
if record_name in proj4_fixes:
_rename_dependency(fn, record, "proj.4", "proj4")
if record_name == "airflow-with-async":
_rename_dependency(fn, record, "evenlet", "eventlet")
if record_name == "iris":
_rename_dependency(fn, record, "nc_time_axis", "nc-time-axis")
if (record_name == "r-base" and
not any(dep.startswith("_r-mutex ")
for dep in record["depends"])):
depends = record["depends"]
depends.append("_r-mutex 1.* anacondar_1")
record["depends"] = depends
if record_name == "gcc_impl_{}".format(subdir):
_relax_exact(fn, record, "binutils_impl_{}".format(subdir))
deps = record.get("depends", ())
if "ntl" in deps and record_name != "sage":
_rename_dependency(fn, record, "ntl", "ntl 10.3.0")
if "libiconv >=1.15,<1.16.0a0" in deps:
_pin_looser(fn, record, "libiconv", upper_bound="1.17.0")
if 're2' in deps and record.get('timestamp', 0) < 1588349339243:
_rename_dependency(fn, record, "re2", "re2 <2020.05.01")
if 'libffi' in deps and record.get('timestamp', 0) < 1605980936031:
_rename_dependency(fn, record, "libffi", "libffi <3.3.0.a0")
if 'libffi >=3.2.1,<4.0a0' in deps and record.get('timestamp', 0) < 1605980936031:
_pin_stricter(fn, record, "libffi", "x.x")
_relax_libssh2_1_x_pinning(fn, record)
if any(dep.startswith("gf2x") for dep in deps):
_pin_stricter(fn, record, "gf2x", "x.x")
if any(dep.startswith("libnetcdf >=4.7.3") for dep in deps):
_pin_stricter(fn, record, "libnetcdf", "x.x.x.x")
if any(dep.startswith("libarchive >=3.3") for dep in deps):
_pin_looser(fn, record, "libarchive", upper_bound="3.6.0")
# fix only packages built before the run_exports was corrected.
if any(dep == "libflang" or dep.startswith("libflang >=5.0.0") for dep in deps) and record.get('timestamp', 0) < 1611789153000:
record["depends"].append("libflang <6.0.0.a0")
if any(dep.startswith("libignition-") or dep == 'libsdformat' for dep in deps):
for dep_idx, _ in enumerate(deps):
dep = record['depends'][dep_idx]
if dep.startswith('libignition-'):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
if dep.startswith('libsdformat '):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
# this doesn't seem to match the _pin_looser or _pin_stricter patterns
# nor _replace_pin
if record_name == "jedi" and record.get("timestamp", 0) < 1592619891258:
for i, dep in enumerate(record["depends"]):
if dep.startswith("parso") and "<" not in dep:
_dep_parts = dep.split(" ")
_dep_parts[1] = _dep_parts[1] + ",<0.8.0"
record["depends"][i] = " ".join(_dep_parts)
# FIXME: disable patching-out blas_openblas feature
# because hotfixes are not applied to gcc7 label
# causing inconsistent behavior
# if (record_name == "blas" and
# record["track_features"] == "blas_openblas"):
# instructions["packages"][fn]["track_features"] = None
# if "features" in record:
# if "blas_openblas" in record["features"]:
# # remove blas_openblas feature
# instructions["packages"][fn]["features"] = _extract_feature(
# record, "blas_openblas")
# if not any(d.startswith("blas ") for d in record["depends"]):
# depends = record['depends']
# depends.append("blas 1.* openblas")
# instructions["packages"][fn]["depends"] = depends
if any(dep.startswith("zstd >=1.4") for dep in deps):
_pin_looser(fn, record, "zstd", max_pin="x.x")
# We pin MPI packages loosely so as to rely on their ABI compatibility
if any(dep.startswith("openmpi >=4.0") for dep in deps):
_pin_looser(fn, record, "openmpi", upper_bound="5.0")
if any(dep.startswith("mpich >=3.3") for dep in deps):
_pin_looser(fn, record, "mpich", upper_bound="4.0")
_replace_pin('libunwind >=1.2.1,<1.3.0a0', 'libunwind >=1.2.1,<2.0.0a0', deps, record)
_replace_pin('snappy >=1.1.7,<1.1.8.0a0', 'snappy >=1.1.7,<2.0.0.0a0', deps, record)
_replace_pin('ncurses >=6.1,<6.2.0a0', 'ncurses >=6.1,<6.3.0a0', deps, record)
_replace_pin('abseil-cpp', 'abseil-cpp =20190808', deps, record)
if record_name not in ["blas", "libblas", "libcblas", "liblapack",
"liblapacke", "lapack", "blas-devel"]:
_replace_pin('liblapack >=3.8.0,<3.9.0a0', 'liblapack >=3.8.0,<4.0.0a0', deps, record)
_replace_pin('liblapacke >=3.8.0,<3.9.0a0', 'liblapacke >=3.8.0,<4.0.0a0', deps, record)
# Filter by timestamp as pythia8 also contains python bindings that shouldn't be pinned
if 'pythia8' in deps and record.get('timestamp', 0) < 1584264455759:
i = record['depends'].index('pythia8')
record['depends'][i] = 'pythia8 >=8.240,<8.300.0a0'
# remove features for openjdk and rb2
if ("track_features" in record and
record['track_features'] is not None):
for feat in record["track_features"].split():
if feat.startswith(("rb2", "openjdk")):
record["track_features"] = _extract_track_feature(
record, feat)
llvm_pkgs = ["libclang", "clang", "clang-tools", "llvm", "llvm-tools", "llvmdev"]
for llvm in ["libllvm8", "libllvm9"]:
if any(dep.startswith(llvm) for dep in deps):
if record_name not in llvm_pkgs:
_relax_exact(fn, record, llvm, max_pin="x.x")
else:
_relax_exact(fn, record, llvm, max_pin="x.x.x")
if record_name in llvm_pkgs:
new_constrains = record.get('constrains', [])
version = record["version"]
for pkg in llvm_pkgs:
if record_name == pkg:
continue
if pkg in new_constrains:
del new_constrains[pkg]
if any(constraint.startswith(f"{pkg} ") for constraint in new_constrains):
continue
new_constrains.append(f'{pkg} {version}.*')
record['constrains'] = new_constrains
# make sure the libgfortran version is bound from 3 to 4 for osx
if subdir == "osx-64":
_fix_libgfortran(fn, record)
_fix_libcxx(fn, record)
full_pkg_name = fn.replace('.tar.bz2', '')
if full_pkg_name in OSX_SDK_FIXES:
_set_osx_virt_min(fn, record, OSX_SDK_FIXES[full_pkg_name])
# make old binutils packages conflict with the new sysroot packages
# that have renamed the sysroot from conda_cos6 or conda_cos7 to just
# conda
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"binutils", "binutils_impl_" + subdir, "ld_impl_" + subdir]
and record.get('timestamp', 0) < 1589953178153 # 2020-05-20
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure the old compilers conflict with the new sysroot packages
# and they only use libraries from the old compilers
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
):
new_constrains = record.get('constrains', [])
for pkg in ["libgcc-ng", "libstdcxx-ng", "libgfortran", "libgomp"]:
new_constrains.append("{} 5.4.*|7.2.*|7.3.*|8.2.*|9.1.*|9.2.*".format(pkg))
new_constrains.append("binutils_impl_" + subdir + " <2.34")
new_constrains.append("ld_impl_" + subdir + " <2.34")
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# we pushed a few builds of the compilers past the list of versions
# above which do not use the sysroot packages - this block catches those
# it will also break some test builds of the new compilers but we should
# not be using those anyways and they are marked as broken.
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] not in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# all ctng activation packages that don't depend on the sysroot_*
# packages are not compatible with the new sysroot_*-based compilers
# root and cling must also be included as they have a builtin C++ interpreter
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_" + subdir, "gxx_" + subdir, "gfortran_" + subdir,
"binutils_" + subdir, "gcc_bootstrap_" + subdir, "root_base", "cling"]
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# old CDTs with the conda_cos6 or conda_cos7 name in the sysroot need to
# conflict with the new CDT and compiler packages
# all of the new CDTs and compilers depend on the sysroot_{subdir} packages
# so we use a constraint on those
if (
subdir == "noarch"
and (
record_name.endswith("-cos6-x86_64") or
record_name.endswith("-cos7-x86_64") or
record_name.endswith("-cos7-aarch64") or
record_name.endswith("-cos7-ppc64le")
)
and not record_name.startswith("sysroot-")
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
if record_name.endswith("x86_64"):
sys_subdir = "linux-64"
elif record_name.endswith("aarch64"):
sys_subdir = "linux-aarch64"
elif record_name.endswith("ppc64le"):
sys_subdir = "linux-ppc64le"
new_constrains = record.get('constrains', [])
if not any(__r.startswith("sysroot_") for __r in new_constrains):
new_constrains.append("sysroot_" + sys_subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure pybind11 and pybind11-global have run constraints on
# the abi metapackage
# see https://github.com/conda-forge/conda-forge-repodata-patches-feedstock/issues/104 # noqa
if (
record_name in ["pybind11", "pybind11-global"]
# this version has a constraint sometimes
and (
pkg_resources.parse_version(record["version"])
<= pkg_resources.parse_version("2.6.1")
)
and not any(
c.startswith("pybind11-abi ")
for c in record.get("constrains", [])
)
):
_add_pybind11_abi_constraint(fn, record)
# add *lal>=7.1.1 as run_constrained for liblal-7.1.1
if (
record_name == "liblal"
and record['version'] == "7.1.1"
and record['build_number'] in (0, 1, 2, 100, 101, 102)
):
record.setdefault('constrains', []).extend((
"lal >=7.1.1",
"python-lal >=7.1.1",
))
return index
def _add_pybind11_abi_constraint(fn, record):
"""the pybind11-abi package uses the internals version
here are the ranges
v2.2.0 1
v2.2.1 1
v2.2.2 1
v2.2.3 1
v2.2.4 2
v2.3.0 3
v2.4.0 3
v2.4.1 3
v2.4.2 3
v2.4.3 3
v2.5.0 4
v2.6.0 4
v2.6.0b1 4
v2.6.0rc1 4
v2.6.0rc2 4
v2.6.0rc3 4
v2.6.1 4
prior to 2.2.0 we set it to 0
"""
ver = pkg_resources.parse_version(record["version"])
if ver < pkg_resources.parse_version("2.2.0"):
abi_ver = "0"
elif ver < pkg_resources.parse_version("2.2.4"):
abi_ver = "1"
elif ver < pkg_resources.parse_version("2.3.0"):
abi_ver = "2"
elif ver < pkg_resources.parse_version("2.5.0"):
abi_ver = "3"
elif ver <= pkg_resources.parse_version("2.6.1"):
abi_ver = "4"
else:
# past this we should have a constrains there already
raise RuntimeError(
"pybind11 version %s out of range for abi" % record["version"]
)
constrains = record.get("constrains", [])
found_idx = None
for idx in range(len(constrains)):
if constrains[idx].startswith("pybind11-abi "):
found_idx = idx
if found_idx is None:
constrains.append("pybind11-abi ==" + abi_ver)
else:
constrains[found_idx] = "pybind11-abi ==" + abi_ver
record["constrains"] = constrains
def _replace_pin(old_pin, new_pin, deps, record):
"""Replace an exact pin with a new one."""
if old_pin in deps:
i = record['depends'].index(old_pin)
record['depends'][i] = new_pin
def _rename_dependency(fn, record, old_name, new_name):
depends = record["depends"]
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == old_name),
None
)
if dep_idx is not None:
parts = depends[dep_idx].split(" ")
remainder = (" " + " ".join(parts[1:])) if len(parts) > 1 else ""
depends[dep_idx] = new_name + remainder
record['depends'] = depends
def _fix_libgfortran(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libgfortran"),
None
)
if dep_idx is not None:
# make sure respect minimum versions still there
# 'libgfortran' -> >=3.0.1,<4.0.0.a0
# 'libgfortran ==3.0.1' -> ==3.0.1
# 'libgfortran >=3.0' -> >=3.0,<4.0.0.a0
# 'libgfortran >=3.0.1' -> >=3.0.1,<4.0.0.a0
if ("==" in depends[dep_idx]) or ("<" in depends[dep_idx]):
pass
elif depends[dep_idx] == "libgfortran":
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0.1" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0,<4.0.0.a0"
record['depends'] = depends
elif ">=4" in depends[dep_idx]:
# catches all of 4.*
depends[dep_idx] = "libgfortran >=4.0.0,<5.0.0.a0"
record['depends'] = depends
def _set_osx_virt_min(fn, record, min_vers):
rconst = record.get("constrains", ())
dep_idx = next(
(q for q, dep in enumerate(rconst)
if dep.split(' ')[0] == "__osx"),
None
)
run_constrained = list(rconst)
if dep_idx is None:
run_constrained.append("__osx >=%s" % min_vers)
if run_constrained:
record['constrains'] = run_constrained
def _fix_libcxx(fn, record):
record_name = record["name"]
if not record_name in ["cctools", "ld64", "llvm-lto-tapi"]:
return
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libcxx"),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) >= 2 and dep_parts[1] == "4.0.1":
# catches all of 4.*
depends[dep_idx] = "libcxx >=4.0.1"
record['depends'] = depends
def pad_list(l, num):
if len(l) >= num:
return l
return l + ["0"]*(num - len(l))
def get_upper_bound(version, max_pin):
num_x = max_pin.count("x")
ver = pad_list(version.split("."), num_x)
ver[num_x:] = ["0"]*(len(ver)-num_x)
ver[num_x-1] = str(int(ver[num_x-1])+1)
return ".".join(ver)
def _relax_exact(fn, record, fix_dep, max_pin=None):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == fix_dep),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if (len(dep_parts) == 3 and \
not any(dep_parts[1].startswith(op) for op in OPERATORS)):
if max_pin is not None:
upper_bound = get_upper_bound(dep_parts[1], max_pin) + "a0"
depends[dep_idx] = "{} >={},<{}".format(*dep_parts[:2], upper_bound)
else:
depends[dep_idx] = "{} >={}".format(*dep_parts[:2])
record['depends'] = depends
def _match_strict_libssh2_1_x_pin(dep):
if dep.startswith("libssh2 >=1.8.0,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.1,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.2,<1.9.0a0"):
return True
if dep.startswith("libssh2 1.8.*"):
return True
return False
def _relax_libssh2_1_x_pinning(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if _match_strict_libssh2_1_x_pin(dep)),
None
)
if dep_idx is not None:
depends[dep_idx] = "libssh2 >=1.8.0,<2.0.0a0"
cb_pin_regex = re.compile(r"^>=(?P<lower>\d(\.\d+)*a?),<(?P<upper>\d(\.\d+)*)a0$")
def _pin_stricter(fn, record, fix_dep, max_pin):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
new_upper = get_upper_bound(lower, max_pin).split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) > tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _pin_looser(fn, record, fix_dep, max_pin=None, upper_bound=None):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
if upper_bound is None:
new_upper = get_upper_bound(lower, max_pin).split(".")
else:
new_upper = upper_bound.split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) < tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _extract_and_remove_vc_feature(record):
features = record.get('features', '').split()
vc_features = tuple(f for f in features if f.startswith('vc'))
if not vc_features:
return None
non_vc_features = tuple(f for f in features if f not in vc_features)
vc_version = int(vc_features[0][2:]) # throw away all but the first
if non_vc_features:
record['features'] = ' '.join(non_vc_features)
else:
record['features'] = None
return vc_version
def _extract_feature(record, feature_name):
features = record.get('features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def _extract_track_feature(record, feature_name):
features = record.get('track_features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def main():
# Step 1. Collect initial repodata for all subdirs.
repodatas = {}
if "CF_SUBDIR" in os.environ:
# For local debugging
subdirs = os.environ["CF_SUBDIR"].split(";")
else:
subdirs = SUBDIRS
for subdir in tqdm.tqdm(subdirs, desc="Downloading repodata"):
repodata_url = "/".join(
(CHANNEL_ALIAS, CHANNEL_NAME, subdir, "repodata_from_packages.json"))
response = requests.get(repodata_url)
response.raise_for_status()
repodatas[subdir] = response.json()
# Step 2. Create all patch instructions.
prefix_dir = os.getenv("PREFIX", "tmp")
for subdir in subdirs:
prefix_subdir = join(prefix_dir, subdir)
if not isdir(prefix_subdir):
os.makedirs(prefix_subdir)
# Step 2a. Generate a new index.
new_index = _gen_new_index(repodatas[subdir], subdir)
# Step 2b. Generate the instructions by diff'ing the indices.
instructions = _gen_patch_instructions(
repodatas[subdir]['packages'], new_index, subdir)
# Step 2c. Output this to $PREFIX so that we bundle the JSON files.
patch_instructions_path = join(
prefix_subdir, "patch_instructions.json")
with open(patch_instructions_path, 'w') as fh:
json.dump(
instructions, fh, indent=2,
sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
sys.exit(main())
| []
| []
| [
"CF_SUBDIR",
"PREFIX"
]
| [] | ["CF_SUBDIR", "PREFIX"] | python | 2 | 0 | |
examples/create_jobs_from_template/create_jobs_from_template.py | """
Creating jobs from templates.
"""
# pylint: disable=R0801
import os
import sys
import json
from jinja2 import Environment, FileSystemLoader
from dotenv import load_dotenv
from mitto_sdk import Mitto
load_dotenv()
BASE_URL = os.getenv("MITTO_BASE_URL")
API_KEY = os.getenv("MITTO_API_KEY")
INPUT_DIRECTORY = "templates"
TEMPLATE_FILE = "sql.json"
MITTO_JOBS = [
{
"name": "sql_select_1_from_api",
"title": "[SQL] Select 1 from API",
"sql": "select 1;"
},
{
"name": "sql_select_2_from_api",
"title": "[SQL] Select 2 from API",
"sql": "select 2;"
},
{
"name": "sql_select_3_from_api",
"title": "[SQL] Select 3 from API",
"sql": "select 3;"
}
]
def main():
"""creating jobs from template"""
# set up Jinja
template_env = Environment(loader=FileSystemLoader(INPUT_DIRECTORY))
template = template_env.get_template(TEMPLATE_FILE)
# set up Mitto
mitto = Mitto(
base_url=BASE_URL,
api_key=API_KEY
)
# create a Mitto job for each Jinja job template
for job in MITTO_JOBS:
jinja_template = template.render(
job_name=job["name"],
job_title=job["title"],
sql=job["sql"]
)
job_template = json.loads(jinja_template)
job = mitto.create_job(job=job_template)
print(job)
if __name__ == "__main__":
sys.exit(main())
| []
| []
| [
"MITTO_API_KEY",
"MITTO_BASE_URL"
]
| [] | ["MITTO_API_KEY", "MITTO_BASE_URL"] | python | 2 | 0 | |
main.go | // Copyright 2019 Layer5.io
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/layer5io/meshery-adapter-library/adapter"
configprovider "github.com/layer5io/meshery-adapter-library/config/provider"
"github.com/layer5io/meshery-consul/consul/oam"
"github.com/layer5io/meshery-consul/internal/config"
"github.com/layer5io/meshery-consul/internal/operations"
"github.com/layer5io/meshery-adapter-library/api/grpc"
"github.com/layer5io/meshery-consul/consul"
"github.com/layer5io/meshkit/logger"
"github.com/layer5io/meshkit/utils/manifests"
smp "github.com/layer5io/service-mesh-performance/spec"
)
var (
serviceName = "consul-adapter"
version = "none"
gitsha = "none"
)
func main() {
log, err := logger.New(serviceName, logger.Options{Format: logger.JsonLogFormat, DebugLevel: false})
if err != nil {
fmt.Println("Logger Init Failed", err.Error())
os.Exit(1)
}
cfg, err := config.New(configprovider.Options{
ServerConfig: config.ServerDefaults,
MeshSpec: config.MeshSpecDefaults,
ProviderConfig: config.ViperDefaults,
Operations: operations.Operations,
},
)
if err != nil {
log.Error(err)
os.Exit(1)
}
service := &grpc.Service{}
_ = cfg.GetObject(adapter.ServerKey, &service)
kubeCfg, err := config.New(configprovider.Options{
ProviderConfig: config.KubeConfigDefaults,
})
if err != nil {
log.Error(err)
os.Exit(1)
}
// KUBECONFIG is required by the Helm library, and other tools that might be used by the adapter in the future.
kubeconfig := path.Join(
config.KubeConfigDefaults[configprovider.FilePath],
fmt.Sprintf("%s.%s", config.KubeConfigDefaults[configprovider.FileName], config.KubeConfigDefaults[configprovider.FileType]))
err = os.Setenv("KUBECONFIG", kubeconfig)
if err != nil {
log.Error(err)
os.Exit(1)
}
log.Info(fmt.Sprintf("KUBECONFIG: %s", kubeconfig))
service.Handler = consul.New(cfg, log, kubeCfg)
service.Channel = make(chan interface{}, 100)
service.StartedAt = time.Now()
service.Version = version
service.GitSHA = gitsha
go registerCapabilities(service.Port, log) //Registering static capabilities
go registerDynamicCapabilities(service.Port, log) //Registering latest capabilities periodically
// Server Initialization
log.Info("Adaptor Listening at port: ", service.Port)
err = grpc.Start(service, nil)
if err != nil {
log.Error(grpc.ErrGrpcServer(err))
os.Exit(1)
}
}
func registerCapabilities(port string, log logger.Handler) {
log.Info("Registering static workloads...")
// Register workloads
if err := oam.RegisterWorkloads(mesheryServerAddress(), serviceAddress()+":"+port); err != nil {
log.Info(err.Error())
}
// Register traits
if err := oam.RegisterTraits(mesheryServerAddress(), serviceAddress()+":"+port); err != nil {
log.Info(err.Error())
}
}
func registerDynamicCapabilities(port string, log logger.Handler) {
registerWorkloads(port, log)
//Start the ticker
const reRegisterAfter = 24
ticker := time.NewTicker(reRegisterAfter * time.Hour)
for {
<-ticker.C
registerWorkloads(port, log)
}
}
func registerWorkloads(port string, log logger.Handler) {
crds, err := config.GetFileNames("https://api.github.com/repos/hashicorp/consul-k8s", "control-plane/config/crd/bases")
if err != nil {
log.Info("Could not get manifest names ", err.Error())
return
}
rel, err := config.GetLatestReleases(1)
if err != nil {
log.Info("Could not get latest version ", err.Error())
return
}
appVersion := rel[0].TagName
log.Info("Registering latest workload components for version ", appVersion)
// Register workloads
for _, manifest := range crds {
if err := adapter.RegisterWorkLoadsDynamically(mesheryServerAddress(), serviceAddress()+":"+port, &adapter.DynamicComponentsConfig{
TimeoutInMinutes: 30,
URL: "https://raw.githubusercontent.com/hashicorp/consul-k8s/main/control-plane/config/crd/bases/" + manifest,
GenerationMethod: adapter.Manifests,
Config: manifests.Config{
Name: smp.ServiceMesh_Type_name[int32(smp.ServiceMesh_CONSUL)],
MeshVersion: appVersion,
Filter: manifests.CrdFilter{
RootFilter: []string{"$[?(@.kind==\"CustomResourceDefinition\")]"},
NameFilter: []string{"$..[\"spec\"][\"names\"][\"kind\"]"},
VersionFilter: []string{"$..spec.versions[0]", " --o-filter", "$[0]"},
GroupFilter: []string{"$..spec", " --o-filter", "$[]"},
SpecFilter: []string{"$..openAPIV3Schema.properties.spec", " --o-filter", "$[]"},
},
},
Operation: config.ConsulOperation,
}); err != nil {
log.Info(err.Error())
return
}
}
log.Info("Latest workload components successfully registered.")
}
func mesheryServerAddress() string {
meshReg := os.Getenv("MESHERY_SERVER")
if meshReg != "" {
if strings.HasPrefix(meshReg, "http") {
return meshReg
}
return "http://" + meshReg
}
return "http://localhost:9081"
}
func serviceAddress() string {
svcAddr := os.Getenv("SERVICE_ADDR")
if svcAddr != "" {
return svcAddr
}
return "mesherylocal.layer5.io"
}
| [
"\"MESHERY_SERVER\"",
"\"SERVICE_ADDR\""
]
| []
| [
"MESHERY_SERVER",
"SERVICE_ADDR"
]
| [] | ["MESHERY_SERVER", "SERVICE_ADDR"] | go | 2 | 0 | |
core/src/test/java/org/apache/accumulo/core/conf/SiteConfigurationTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.conf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.io.File;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
public class SiteConfigurationTest {
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN",
justification = "path to keystore not provided by user input")
@Test
public void testOnlySensitivePropertiesExtractedFromCredentialProvider()
throws SecurityException {
// site-cfg.jceks={'ignored.property'=>'ignored', 'instance.secret'=>'mysecret',
// 'general.rpc.timeout'=>'timeout'}
URL keystore = SiteConfigurationTest.class.getResource("/site-cfg.jceks");
assertNotNull(keystore);
String credProvPath = "jceks://file" + new File(keystore.getFile()).getAbsolutePath();
var overrides =
Map.of(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvPath);
var config = new SiteConfiguration.Builder().noFile().withOverrides(overrides).build();
assertEquals("mysecret", config.get(Property.INSTANCE_SECRET));
assertNull(config.get("ignored.property"));
assertEquals(Property.GENERAL_RPC_TIMEOUT.getDefaultValue(),
config.get(Property.GENERAL_RPC_TIMEOUT.getKey()));
}
@Test
public void testDefault() {
var conf = SiteConfiguration.auto();
assertEquals("localhost:2181", conf.get(Property.INSTANCE_ZK_HOST));
assertEquals("DEFAULT", conf.get(Property.INSTANCE_SECRET));
assertEquals("", conf.get(Property.INSTANCE_VOLUMES));
assertEquals("120s", conf.get(Property.GENERAL_RPC_TIMEOUT));
assertEquals("1G", conf.get(Property.TSERV_WALOG_MAX_SIZE));
assertEquals("org.apache.accumulo.core.cryptoImpl.NoCryptoService",
conf.get(Property.INSTANCE_CRYPTO_SERVICE));
}
@Test
public void testFile() {
System.setProperty("DIR", "/tmp/test/dir");
URL propsUrl = getClass().getClassLoader().getResource("accumulo2.properties");
var conf = new SiteConfiguration.Builder().fromUrl(propsUrl).build();
assertEquals("myhost123:2181", conf.get(Property.INSTANCE_ZK_HOST));
assertEquals("mysecret", conf.get(Property.INSTANCE_SECRET));
assertEquals("hdfs://localhost:8020/accumulo123", conf.get(Property.INSTANCE_VOLUMES));
assertEquals("123s", conf.get(Property.GENERAL_RPC_TIMEOUT));
assertEquals("256M", conf.get(Property.TSERV_WALOG_MAX_SIZE));
assertEquals("org.apache.accumulo.core.cryptoImpl.AESCryptoService",
conf.get(Property.INSTANCE_CRYPTO_SERVICE));
assertEquals(System.getenv("USER"), conf.get("general.test.user.name"));
assertEquals("/tmp/test/dir", conf.get("general.test.user.dir"));
}
@Test
public void testConfigOverrides() {
var conf = SiteConfiguration.auto();
assertEquals("localhost:2181", conf.get(Property.INSTANCE_ZK_HOST));
conf = new SiteConfiguration.Builder().noFile()
.withOverrides(Map.of(Property.INSTANCE_ZK_HOST.getKey(), "myhost:2181")).build();
assertEquals("myhost:2181", conf.get(Property.INSTANCE_ZK_HOST));
var results = new HashMap<String,String>();
conf.getProperties(results, p -> p.startsWith("instance"));
assertEquals("myhost:2181", results.get(Property.INSTANCE_ZK_HOST.getKey()));
}
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | java | 1 | 0 | |
config.go | package main
import (
"errors"
"fmt"
"os"
"os/exec"
"github.com/nextdns/nextdns/config"
)
func cfg(args []string) error {
args = args[1:]
subCmd := "list"
if len(args) > 0 {
subCmd = args[0]
args = args[1:]
}
switch subCmd {
case "list":
var c config.Config
c.Parse("nextdns config list", args, true)
return c.Write(os.Stdout)
case "set":
var c config.Config
c.Parse("nextdns config set", args, true)
return c.Save()
case "edit":
var c config.Config
c.Parse("nextdns config edit", nil, true)
tmp, err := os.CreateTemp("", "")
if err != nil {
return err
}
defer os.Remove(tmp.Name())
if err := c.Write(tmp); err != nil {
tmp.Close()
return err
}
tmp.Close()
editor := os.Getenv("EDITOR")
if editor == "" {
editor = "vi"
}
cmd := exec.Command(editor, tmp.Name())
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return fmt.Errorf("%s: %v", editor, err)
}
c = config.Config{}
c.Parse("nextdns config edit", []string{"-config-file", tmp.Name()}, true)
c.File = ""
return c.Save()
case "wizard":
return installer("configure")
default:
return errors.New("usage: \n" +
" config list list configuration options\n" +
" config set [options] set a configuration option\n" +
" (see config set -h for list of options)\n" +
" config edit edit configuration using default editor\n" +
" config wizard run the configuration wizard")
}
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
api/auth/token.go | package auth
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
jwt "github.com/dgrijalva/jwt-go"
)
// CreateToken ...
func CreateToken(userID uint32) (string, error) {
claims := jwt.MapClaims{}
claims["authorized"] = true
claims["user_id"] = userID
claims["exp"] = time.Now().Add(time.Hour * 1).Unix() //Token expires after 1 hour
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString([]byte(os.Getenv("API_SECRET")))
}
// TokenValid ...
func TokenValid(r *http.Request) error {
tokenString := ExtractToken(r)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("API_SECRET")), nil
})
if err != nil {
return err
}
if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {
Pretty(claims)
}
return nil
}
// ExtractToken ...
func ExtractToken(r *http.Request) string {
keys := r.URL.Query()
token := keys.Get("token")
if token != "" {
return token
}
bearerToken := r.Header.Get("Authorization")
if len(strings.Split(bearerToken, " ")) == 2 {
return strings.Split(bearerToken, " ")[1]
}
return ""
}
// ExtractTokenID ...
func ExtractTokenID(r *http.Request) (uint32, error) {
tokenString := ExtractToken(r)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("API_SECRET")), nil
})
if err != nil {
return 0, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
uid, err := strconv.ParseUint(fmt.Sprintf("%.0f", claims["user_id"]), 10, 32)
if err != nil {
return 0, err
}
return uint32(uid), nil
}
return 0, nil
}
//Pretty display the claims licely in the terminal
func Pretty(data interface{}) {
b, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Println(err)
return
}
fmt.Println(string(b))
}
| [
"\"API_SECRET\"",
"\"API_SECRET\"",
"\"API_SECRET\""
]
| []
| [
"API_SECRET"
]
| [] | ["API_SECRET"] | go | 1 | 0 | |
cmd/autoscaler/main.go | /*
Copyright 2021 Cortex Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/cortexlabs/cortex/pkg/autoscaler"
"github.com/cortexlabs/cortex/pkg/lib/aws"
"github.com/cortexlabs/cortex/pkg/lib/errors"
"github.com/cortexlabs/cortex/pkg/lib/k8s"
"github.com/cortexlabs/cortex/pkg/lib/logging"
"github.com/cortexlabs/cortex/pkg/lib/telemetry"
"github.com/cortexlabs/cortex/pkg/types/userconfig"
"github.com/gorilla/mux"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"go.uber.org/zap"
istioclient "istio.io/client-go/pkg/clientset/versioned"
istioinformers "istio.io/client-go/pkg/informers/externalversions"
"k8s.io/apimachinery/pkg/api/meta"
kmeta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
)
func main() {
var (
port int
inCluster bool
prometheusURL string
namespace string
)
flag.IntVar(&port, "port", 8000, "port where the autoscaler server will be exposed")
flag.BoolVar(&inCluster, "in-cluster", false, "use when autoscaler runs in-cluster")
flag.StringVar(&prometheusURL, "prometheus-url", os.Getenv("CORTEX_PROMETHEUS_URL"),
"prometheus url (can be set through the CORTEX_PROMETHEUS_URL env variable)",
)
flag.StringVar(&namespace, "namespace", os.Getenv("CORTEX_NAMESPACE"),
"kubernetes namespace where the cortex APIs are deployed "+
"(can be set through the CORTEX_NAMESPACE env variable)",
)
flag.Parse()
log := logging.GetLogger()
defer func() {
_ = log.Sync()
}()
switch {
case prometheusURL == "":
log.Fatal("--prometheus-url is a required option")
case namespace == "":
log.Fatal("--namespace is a required option")
}
awsClient, err := aws.New()
if err != nil {
exit(log, err)
}
_, userID, err := awsClient.CheckCredentials()
if err != nil {
exit(log, err)
}
telemetryEnabled := strings.ToLower(os.Getenv("CORTEX_TELEMETRY_DISABLE")) != "true"
err = telemetry.Init(telemetry.Config{
Enabled: telemetryEnabled,
UserID: userID,
Properties: map[string]string{
"kind": userconfig.RealtimeAPIKind.String(),
"image_type": "autoscaler",
},
Environment: "operator",
LogErrors: true,
BackoffMode: telemetry.BackoffDuplicateMessages,
})
if err != nil {
log.Fatalw("failed to initialize telemetry", zap.Error(err))
}
defer telemetry.Close()
scheme := runtime.NewScheme()
if err := clientgoscheme.AddToScheme(scheme); err != nil {
exit(log, err, "failed to add k8s client-go-scheme to scheme")
}
k8sClient, err := k8s.New(namespace, inCluster, nil, scheme)
if err != nil {
exit(log, err, "failed to initialize kubernetes client")
}
//goland:noinspection GoNilness
istioClient, err := istioclient.NewForConfig(k8sClient.RestConfig)
if err != nil {
exit(log, err, "failed to initialize istio client")
}
promClient, err := promapi.NewClient(
promapi.Config{
Address: prometheusURL,
},
)
if err != nil {
exit(log, err, "failed to initialize prometheus client")
}
promAPIClient := promv1.NewAPI(promClient)
realtimeScaler := autoscaler.NewRealtimeScaler(k8sClient, promAPIClient, log)
asyncScaler := autoscaler.NewAsyncScaler(k8sClient, promAPIClient)
autoScaler := autoscaler.New(log)
autoScaler.AddScaler(realtimeScaler, userconfig.RealtimeAPIKind)
autoScaler.AddScaler(asyncScaler, userconfig.AsyncAPIKind)
defer autoScaler.Stop()
istioInformerFactory := istioinformers.NewSharedInformerFactoryWithOptions(
istioClient, 10*time.Second, // TODO: check how much makes sense
istioinformers.WithNamespace(namespace),
istioinformers.WithTweakListOptions(informerFilter),
)
virtualServiceInformer := istioInformerFactory.Networking().V1beta1().VirtualServices().Informer()
virtualServiceInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
resource, err := meta.Accessor(obj)
if err != nil {
log.Errorw("failed to access resource metadata", zap.Error(err))
telemetry.Error(err)
return
}
if resource.GetNamespace() != namespace {
// filter out virtual services that are not in the cortex namespace
return
}
api, err := apiResourceFromLabels(resource.GetLabels())
if err != nil {
// filter out non-cortex apis
return
}
if err := autoScaler.AddAPI(api); err != nil {
log.Errorw("failed to add API to autoscaler",
zap.Error(err),
zap.String("apiName", api.Name),
zap.String("apiKind", api.Kind.String()),
)
telemetry.Error(err)
return
}
},
DeleteFunc: func(obj interface{}) {
resource, err := meta.Accessor(obj)
if err != nil {
log.Errorw("failed to access resource metadata", zap.Error(err))
}
if resource.GetNamespace() != namespace {
// filter out virtual services that are not in the cortex namespace
return
}
api, err := apiResourceFromLabels(resource.GetLabels())
if err != nil {
// filter out non-cortex apis
return
}
autoScaler.RemoveAPI(api)
},
},
)
handler := autoscaler.NewHandler(autoScaler)
router := mux.NewRouter()
router.HandleFunc("/awaken", handler.Awaken).Methods(http.MethodPost)
router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
}).Methods(http.MethodGet)
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: router,
}
stopCh := make(chan struct{})
go virtualServiceInformer.Run(stopCh)
defer func() { stopCh <- struct{}{} }()
errCh := make(chan error)
go func() {
log.Infof("Starting autoscaler server on %s", server.Addr)
errCh <- server.ListenAndServe()
}()
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
select {
case err = <-errCh:
exit(log, err, "failed to start autoscaler server")
case <-sigint:
log.Info("Received INT or TERM signal, handling a graceful shutdown...")
log.Info("Shutting down server")
if err = server.Shutdown(context.Background()); err != nil {
// Error from closing listeners, or context timeout:
log.Warnw("HTTP server Shutdown Error", zap.Error(err))
}
log.Info("Shutdown complete, exiting...")
}
}
func apiResourceFromLabels(labels map[string]string) (userconfig.Resource, error) {
apiName, ok := labels["apiName"]
if !ok {
return userconfig.Resource{}, fmt.Errorf("apiName key does not exist")
}
apiKind, ok := labels["apiKind"]
if !ok {
return userconfig.Resource{}, fmt.Errorf("apiKind key does not exist")
}
return userconfig.Resource{
Name: apiName,
Kind: userconfig.KindFromString(apiKind),
}, nil
}
func informerFilter(listOptions *kmeta.ListOptions) {
listOptions.LabelSelector = kmeta.FormatLabelSelector(&kmeta.LabelSelector{
MatchExpressions: []kmeta.LabelSelectorRequirement{
{
Key: "apiName",
Operator: kmeta.LabelSelectorOpExists,
},
{
Key: "apiKind",
Operator: kmeta.LabelSelectorOpExists,
},
},
})
}
func exit(log *zap.SugaredLogger, err error, wrapStrs ...string) {
if err == nil {
os.Exit(0)
}
for _, str := range wrapStrs {
err = errors.Wrap(err, str)
}
telemetry.Error(err)
if !errors.IsNoPrint(err) {
log.Fatal(err)
}
os.Exit(1)
}
| [
"\"CORTEX_PROMETHEUS_URL\"",
"\"CORTEX_NAMESPACE\"",
"\"CORTEX_TELEMETRY_DISABLE\""
]
| []
| [
"CORTEX_TELEMETRY_DISABLE",
"CORTEX_PROMETHEUS_URL",
"CORTEX_NAMESPACE"
]
| [] | ["CORTEX_TELEMETRY_DISABLE", "CORTEX_PROMETHEUS_URL", "CORTEX_NAMESPACE"] | go | 3 | 0 | |
db/db.go | package db
import (
"database/sql"
"fmt"
"os"
)
// New returns new instance of db
func New() (db *sql.DB, err error) {
config, err := loadConfig()
if err != nil {
return nil, fmt.Errorf("Error loading config: %v", err)
}
db, err = initDatabase(config)
if err != nil {
return nil, fmt.Errorf("Error initializing DB: %v", err)
}
if err = prepareDatabase(db); err != nil {
return nil, fmt.Errorf("Error creating schema and tables, error: %v", err)
}
if err = db.Ping(); err != nil {
return nil, fmt.Errorf("Error pinging DB: %v", err)
}
return db, err
}
// Config represents structure of the config.env
type Config struct {
dbUser string
dbPass string
dbName string
dbHost string
dbPort string
}
// loadConfig loads env variables from config.env
func loadConfig() (config *Config, err error) {
config = &Config{
dbUser: os.Getenv("DB_USER"),
dbPass: os.Getenv("DB_PASS"),
dbName: os.Getenv("DB_NAME"),
dbHost: os.Getenv("DB_HOST"),
dbPort: os.Getenv("DB_PORT"),
}
return config, err
}
// initDatabase returns new database connected to postgres
func initDatabase(c *Config) (db *sql.DB, err error) {
psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+
"password=%s dbname=%s sslmode=disable",
c.dbHost, c.dbPort, c.dbUser, c.dbPass, c.dbName)
db, err = sql.Open("postgres", psqlInfo)
return db, err
}
// prepareDatabase prepares db to generation
func prepareDatabase(db *sql.DB) error {
_, err := db.Query(SchemaQuery)
if err != nil {
return err
}
_, err = db.Query(TablesQuery)
if err != nil {
return err
}
return nil
}
| [
"\"DB_USER\"",
"\"DB_PASS\"",
"\"DB_NAME\"",
"\"DB_HOST\"",
"\"DB_PORT\""
]
| []
| [
"DB_HOST",
"DB_PORT",
"DB_NAME",
"DB_PASS",
"DB_USER"
]
| [] | ["DB_HOST", "DB_PORT", "DB_NAME", "DB_PASS", "DB_USER"] | go | 5 | 0 | |
server/driver_tidb.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"sync/atomic"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/sqlexec"
)
// TiDBDriver implements IDriver.
type TiDBDriver struct {
store kv.Storage
}
// NewTiDBDriver creates a new TiDBDriver.
func NewTiDBDriver(store kv.Storage) *TiDBDriver {
driver := &TiDBDriver{
store: store,
}
return driver
}
// TiDBContext implements QueryCtx.
type TiDBContext struct {
session.Session
currentDB string
stmts map[int]*TiDBStatement
}
// TiDBStatement implements PreparedStatement.
type TiDBStatement struct {
id uint32
numParams int
boundParams [][]byte
paramsType []byte
ctx *TiDBContext
rs ResultSet
sql string
}
// ID implements PreparedStatement ID method.
func (ts *TiDBStatement) ID() int {
return int(ts.id)
}
// Execute implements PreparedStatement Execute method.
func (ts *TiDBStatement) Execute(ctx context.Context, args []types.Datum) (rs ResultSet, err error) {
tidbRecordset, err := ts.ctx.ExecutePreparedStmt(ctx, ts.id, args)
if err != nil {
return nil, err
}
if tidbRecordset == nil {
return
}
rs = &tidbResultSet{
recordSet: tidbRecordset,
preparedStmt: ts.ctx.GetSessionVars().PreparedStmts[ts.id].(*core.CachedPrepareStmt),
}
return
}
// AppendParam implements PreparedStatement AppendParam method.
func (ts *TiDBStatement) AppendParam(paramID int, data []byte) error {
if paramID >= len(ts.boundParams) {
return mysql.NewErr(mysql.ErrWrongArguments, "stmt_send_longdata")
}
// If len(data) is 0, append an empty byte slice to the end to distinguish no data and no parameter.
if len(data) == 0 {
ts.boundParams[paramID] = []byte{}
} else {
ts.boundParams[paramID] = append(ts.boundParams[paramID], data...)
}
return nil
}
// NumParams implements PreparedStatement NumParams method.
func (ts *TiDBStatement) NumParams() int {
return ts.numParams
}
// BoundParams implements PreparedStatement BoundParams method.
func (ts *TiDBStatement) BoundParams() [][]byte {
return ts.boundParams
}
// SetParamsType implements PreparedStatement SetParamsType method.
func (ts *TiDBStatement) SetParamsType(paramsType []byte) {
ts.paramsType = paramsType
}
// GetParamsType implements PreparedStatement GetParamsType method.
func (ts *TiDBStatement) GetParamsType() []byte {
return ts.paramsType
}
// StoreResultSet stores ResultSet for stmt fetching
func (ts *TiDBStatement) StoreResultSet(rs ResultSet) {
// refer to https://dev.mysql.com/doc/refman/5.7/en/cursor-restrictions.html
// You can have open only a single cursor per prepared statement.
// closing previous ResultSet before associating a new ResultSet with this statement
// if it exists
if ts.rs != nil {
terror.Call(ts.rs.Close)
}
ts.rs = rs
}
// GetResultSet gets ResultSet associated this statement
func (ts *TiDBStatement) GetResultSet() ResultSet {
return ts.rs
}
// Reset implements PreparedStatement Reset method.
func (ts *TiDBStatement) Reset() {
for i := range ts.boundParams {
ts.boundParams[i] = nil
}
// closing previous ResultSet if it exists
if ts.rs != nil {
terror.Call(ts.rs.Close)
ts.rs = nil
}
}
// Close implements PreparedStatement Close method.
func (ts *TiDBStatement) Close() error {
// TODO close at tidb level
if ts.ctx.GetSessionVars().TxnCtx != nil && ts.ctx.GetSessionVars().TxnCtx.CouldRetry {
err := ts.ctx.DropPreparedStmt(ts.id)
if err != nil {
return err
}
} else {
if core.PreparedPlanCacheEnabled() {
preparedPointer := ts.ctx.GetSessionVars().PreparedStmts[ts.id]
preparedObj, ok := preparedPointer.(*core.CachedPrepareStmt)
if !ok {
return errors.Errorf("invalid CachedPrepareStmt type")
}
ts.ctx.PreparedPlanCache().Delete(core.NewPSTMTPlanCacheKey(
ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion))
}
ts.ctx.GetSessionVars().RemovePreparedStmt(ts.id)
}
delete(ts.ctx.stmts, int(ts.id))
// close ResultSet associated with this statement
if ts.rs != nil {
terror.Call(ts.rs.Close)
}
return nil
}
// OpenCtx implements IDriver.
func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, dbname string, tlsState *tls.ConnectionState) (*TiDBContext, error) {
se, err := session.CreateSession(qd.store)
if err != nil {
return nil, err
}
se.SetTLSState(tlsState)
err = se.SetCollation(int(collation))
if err != nil {
return nil, err
}
se.SetClientCapability(capability)
se.SetConnectionID(connID)
tc := &TiDBContext{
Session: se,
currentDB: dbname,
stmts: make(map[int]*TiDBStatement),
}
return tc, nil
}
// GetWarnings implements QueryCtx GetWarnings method.
func (tc *TiDBContext) GetWarnings() []stmtctx.SQLWarn {
return tc.GetSessionVars().StmtCtx.GetWarnings()
}
// CurrentDB implements QueryCtx CurrentDB method.
func (tc *TiDBContext) CurrentDB() string {
return tc.currentDB
}
// WarningCount implements QueryCtx WarningCount method.
func (tc *TiDBContext) WarningCount() uint16 {
return tc.GetSessionVars().StmtCtx.WarningCount()
}
// ExecuteStmt implements QueryCtx interface.
func (tc *TiDBContext) ExecuteStmt(ctx context.Context, stmt ast.StmtNode) (ResultSet, error) {
rs, err := tc.Session.ExecuteStmt(ctx, stmt)
if err != nil {
tc.Session.GetSessionVars().StmtCtx.AppendError(err)
return nil, err
}
if rs == nil {
return nil, nil
}
return &tidbResultSet{
recordSet: rs,
}, nil
}
// Close implements QueryCtx Close method.
func (tc *TiDBContext) Close() error {
// close PreparedStatement associated with this connection
for _, v := range tc.stmts {
terror.Call(v.Close)
}
tc.Session.Close()
return nil
}
// FieldList implements QueryCtx FieldList method.
func (tc *TiDBContext) FieldList(table string) (columns []*ColumnInfo, err error) {
fields, err := tc.Session.FieldList(table)
if err != nil {
return nil, err
}
columns = make([]*ColumnInfo, 0, len(fields))
for _, f := range fields {
columns = append(columns, convertColumnInfo(f))
}
return columns, nil
}
// GetStatement implements QueryCtx GetStatement method.
func (tc *TiDBContext) GetStatement(stmtID int) PreparedStatement {
tcStmt := tc.stmts[stmtID]
if tcStmt != nil {
return tcStmt
}
return nil
}
// Prepare implements QueryCtx Prepare method.
func (tc *TiDBContext) Prepare(sql string) (statement PreparedStatement, columns, params []*ColumnInfo, err error) {
stmtID, paramCount, fields, err := tc.Session.PrepareStmt(sql)
if err != nil {
return
}
stmt := &TiDBStatement{
sql: sql,
id: stmtID,
numParams: paramCount,
boundParams: make([][]byte, paramCount),
ctx: tc,
}
statement = stmt
columns = make([]*ColumnInfo, len(fields))
for i := range fields {
columns[i] = convertColumnInfo(fields[i])
}
params = make([]*ColumnInfo, paramCount)
for i := range params {
params[i] = &ColumnInfo{
Type: mysql.TypeBlob,
}
}
tc.stmts[int(stmtID)] = stmt
return
}
type tidbResultSet struct {
recordSet sqlexec.RecordSet
columns []*ColumnInfo
rows []chunk.Row
closed int32
preparedStmt *core.CachedPrepareStmt
}
func (trs *tidbResultSet) NewChunkFromAllocator(alloc chunk.Allocator) *chunk.Chunk {
return trs.recordSet.NewChunkFromAllocator(alloc)
}
func (trs *tidbResultSet) NewChunk() *chunk.Chunk {
return trs.recordSet.NewChunk()
}
func (trs *tidbResultSet) Next(ctx context.Context, req *chunk.Chunk) error {
return trs.recordSet.Next(ctx, req)
}
func (trs *tidbResultSet) StoreFetchedRows(rows []chunk.Row) {
trs.rows = rows
}
func (trs *tidbResultSet) GetFetchedRows() []chunk.Row {
if trs.rows == nil {
trs.rows = make([]chunk.Row, 0, 1024)
}
return trs.rows
}
func (trs *tidbResultSet) Close() error {
if !atomic.CompareAndSwapInt32(&trs.closed, 0, 1) {
return nil
}
err := trs.recordSet.Close()
trs.recordSet = nil
return err
}
// OnFetchReturned implements fetchNotifier#OnFetchReturned
func (trs *tidbResultSet) OnFetchReturned() {
if cl, ok := trs.recordSet.(fetchNotifier); ok {
cl.OnFetchReturned()
}
}
func (trs *tidbResultSet) Columns() []*ColumnInfo {
if trs.columns != nil {
return trs.columns
}
// for prepare statement, try to get cached columnInfo array
if trs.preparedStmt != nil {
ps := trs.preparedStmt
if colInfos, ok := ps.ColumnInfos.([]*ColumnInfo); ok {
trs.columns = colInfos
}
}
if trs.columns == nil {
fields := trs.recordSet.Fields()
for _, v := range fields {
trs.columns = append(trs.columns, convertColumnInfo(v))
}
if trs.preparedStmt != nil {
// if ColumnInfo struct has allocated object,
// here maybe we need deep copy ColumnInfo to do caching
trs.preparedStmt.ColumnInfos = trs.columns
}
}
return trs.columns
}
func convertColumnInfo(fld *ast.ResultField) (ci *ColumnInfo) {
ci = &ColumnInfo{
Name: fld.ColumnAsName.O,
OrgName: fld.Column.Name.O,
Table: fld.TableAsName.O,
Schema: fld.DBName.O,
Flag: uint16(fld.Column.Flag),
Charset: uint16(mysql.CharsetNameToID(fld.Column.Charset)),
Type: fld.Column.Tp,
}
if fld.Table != nil {
ci.OrgTable = fld.Table.Name.O
}
if fld.Column.Flen == types.UnspecifiedLength {
ci.ColumnLength = 0
} else {
ci.ColumnLength = uint32(fld.Column.Flen)
}
if fld.Column.Tp == mysql.TypeNewDecimal {
// Consider the negative sign.
ci.ColumnLength++
if fld.Column.Decimal > int(types.DefaultFsp) {
// Consider the decimal point.
ci.ColumnLength++
}
} else if types.IsString(fld.Column.Tp) ||
fld.Column.Tp == mysql.TypeEnum || fld.Column.Tp == mysql.TypeSet { // issue #18870
// Fix issue #4540.
// The flen is a hint, not a precise value, so most client will not use the value.
// But we found in rare MySQL client, like Navicat for MySQL(version before 12) will truncate
// the `show create table` result. To fix this case, we must use a large enough flen to prevent
// the truncation, in MySQL, it will multiply bytes length by a multiple based on character set.
// For examples:
// * latin, the multiple is 1
// * gb2312, the multiple is 2
// * Utf-8, the multiple is 3
// * utf8mb4, the multiple is 4
// We used to check non-string types to avoid the truncation problem in some MySQL
// client such as Navicat. Now we only allow string type enter this branch.
charsetDesc, err := charset.GetCharsetInfo(fld.Column.Charset)
if err != nil {
ci.ColumnLength *= 4
} else {
ci.ColumnLength *= uint32(charsetDesc.Maxlen)
}
}
if fld.Column.Decimal == types.UnspecifiedLength {
if fld.Column.Tp == mysql.TypeDuration {
ci.Decimal = uint8(types.DefaultFsp)
} else {
ci.Decimal = mysql.NotFixedDec
}
} else {
ci.Decimal = uint8(fld.Column.Decimal)
}
// Keep things compatible for old clients.
// Refer to mysql-server/sql/protocol.cc send_result_set_metadata()
if ci.Type == mysql.TypeVarchar {
ci.Type = mysql.TypeVarString
}
return
}
| []
| []
| []
| [] | [] | go | null | null | null |
server/apiserver/argoserver.go | package apiserver
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/gorilla/handlers"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/soheilhy/cmux"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/rest"
"k8s.io/utils/env"
"github.com/argoproj/argo-workflows/v3"
"github.com/argoproj/argo-workflows/v3/config"
"github.com/argoproj/argo-workflows/v3/persist/sqldb"
clusterwftemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
eventpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/event"
eventsourcepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/eventsource"
infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info"
pipelinepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/pipeline"
sensorpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sensor"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/server/artifacts"
"github.com/argoproj/argo-workflows/v3/server/auth"
"github.com/argoproj/argo-workflows/v3/server/auth/sso"
"github.com/argoproj/argo-workflows/v3/server/auth/webhook"
"github.com/argoproj/argo-workflows/v3/server/cache"
"github.com/argoproj/argo-workflows/v3/server/clusterworkflowtemplate"
"github.com/argoproj/argo-workflows/v3/server/cronworkflow"
"github.com/argoproj/argo-workflows/v3/server/event"
"github.com/argoproj/argo-workflows/v3/server/eventsource"
"github.com/argoproj/argo-workflows/v3/server/info"
pipeline "github.com/argoproj/argo-workflows/v3/server/pipeline"
"github.com/argoproj/argo-workflows/v3/server/sensor"
"github.com/argoproj/argo-workflows/v3/server/static"
"github.com/argoproj/argo-workflows/v3/server/types"
"github.com/argoproj/argo-workflows/v3/server/workflow"
"github.com/argoproj/argo-workflows/v3/server/workflowarchive"
"github.com/argoproj/argo-workflows/v3/server/workflowtemplate"
grpcutil "github.com/argoproj/argo-workflows/v3/util/grpc"
"github.com/argoproj/argo-workflows/v3/util/instanceid"
"github.com/argoproj/argo-workflows/v3/util/json"
"github.com/argoproj/argo-workflows/v3/workflow/artifactrepositories"
"github.com/argoproj/argo-workflows/v3/workflow/events"
"github.com/argoproj/argo-workflows/v3/workflow/hydrator"
)
var MaxGRPCMessageSize int
type argoServer struct {
baseHRef string
// https://itnext.io/practical-guide-to-securing-grpc-connections-with-go-and-tls-part-1-f63058e9d6d1
tlsConfig *tls.Config
hsts bool
namespace string
managedNamespace string
clients *types.Clients
gatekeeper auth.Gatekeeper
oAuth2Service sso.Interface
configController config.Controller
stopCh chan struct{}
eventQueueSize int
eventWorkerCount int
eventAsyncDispatch bool
xframeOptions string
accessControlAllowOrigin string
cache *cache.ResourceCache
}
type ArgoServerOpts struct {
BaseHRef string
TLSConfig *tls.Config
Namespaced bool
Namespace string
Clients *types.Clients
RestConfig *rest.Config
AuthModes auth.Modes
// config map name
ConfigName string
ManagedNamespace string
SSONameSpace string
HSTS bool
EventOperationQueueSize int
EventWorkerCount int
EventAsyncDispatch bool
XFrameOptions string
AccessControlAllowOrigin string
}
func init() {
var err error
MaxGRPCMessageSize, err = env.GetInt("GRPC_MESSAGE_SIZE", 100*1024*1024)
if err != nil {
log.Fatalf("GRPC_MESSAGE_SIZE environment variable must be set as an integer: %v", err)
}
}
func getResourceCacheNamespace(opts ArgoServerOpts) string {
if opts.Namespaced {
return opts.SSONameSpace
}
return v1.NamespaceAll
}
func NewArgoServer(ctx context.Context, opts ArgoServerOpts) (*argoServer, error) {
configController := config.NewController(opts.Namespace, opts.ConfigName, opts.Clients.Kubernetes, emptyConfigFunc)
var resourceCache *cache.ResourceCache = nil
ssoIf := sso.NullSSO
if opts.AuthModes[auth.SSO] {
c, err := configController.Get(ctx)
if err != nil {
return nil, err
}
ssoIf, err = sso.New(c.(*Config).SSO, opts.Clients.Kubernetes.CoreV1().Secrets(opts.Namespace), opts.BaseHRef, opts.TLSConfig != nil)
if err != nil {
return nil, err
}
resourceCache = cache.NewResourceCache(opts.Clients.Kubernetes, ctx, getResourceCacheNamespace(opts))
log.Info("SSO enabled")
} else {
log.Info("SSO disabled")
}
gatekeeper, err := auth.NewGatekeeper(opts.AuthModes, opts.Clients, opts.RestConfig, ssoIf, auth.DefaultClientForAuthorization, opts.Namespace, opts.SSONameSpace, opts.Namespaced, resourceCache)
if err != nil {
return nil, err
}
return &argoServer{
baseHRef: opts.BaseHRef,
tlsConfig: opts.TLSConfig,
hsts: opts.HSTS,
namespace: opts.Namespace,
managedNamespace: opts.ManagedNamespace,
clients: opts.Clients,
gatekeeper: gatekeeper,
oAuth2Service: ssoIf,
configController: configController,
stopCh: make(chan struct{}),
eventQueueSize: opts.EventOperationQueueSize,
eventWorkerCount: opts.EventWorkerCount,
eventAsyncDispatch: opts.EventAsyncDispatch,
xframeOptions: opts.XFrameOptions,
accessControlAllowOrigin: opts.AccessControlAllowOrigin,
cache: resourceCache,
}, nil
}
var backoff = wait.Backoff{
Steps: 5,
Duration: 500 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(string)) {
v, err := as.configController.Get(ctx)
if err != nil {
log.Fatal(err)
}
config := v.(*Config)
log.WithFields(log.Fields{"version": argo.GetVersion().Version, "instanceID": config.InstanceID}).Info("Starting Argo Server")
instanceIDService := instanceid.NewService(config.InstanceID)
offloadRepo := sqldb.ExplosiveOffloadNodeStatusRepo
wfArchive := sqldb.NullWorkflowArchive
persistence := config.Persistence
if persistence != nil {
session, tableName, err := sqldb.CreateDBSession(as.clients.Kubernetes, as.namespace, persistence)
if err != nil {
log.Fatal(err)
}
// we always enable node offload, as this is read-only for the Argo Server, i.e. you can turn it off if you
// like and the controller won't offload newly created workflows, but you can still read them
offloadRepo, err = sqldb.NewOffloadNodeStatusRepo(session, persistence.GetClusterName(), tableName)
if err != nil {
log.Fatal(err)
}
// we always enable the archive for the Argo Server, as the Argo Server does not write records, so you can
// disable the archiving - and still read old records
wfArchive = sqldb.NewWorkflowArchive(session, persistence.GetClusterName(), as.managedNamespace, instanceIDService)
}
eventRecorderManager := events.NewEventRecorderManager(as.clients.Kubernetes)
artifactRepositories := artifactrepositories.New(as.clients.Kubernetes, as.managedNamespace, &config.ArtifactRepository)
artifactServer := artifacts.NewArtifactServer(as.gatekeeper, hydrator.New(offloadRepo), wfArchive, instanceIDService, artifactRepositories)
eventServer := event.NewController(instanceIDService, eventRecorderManager, as.eventQueueSize, as.eventWorkerCount, as.eventAsyncDispatch)
grpcServer := as.newGRPCServer(instanceIDService, offloadRepo, wfArchive, eventServer, config.Links, config.NavColor)
httpServer := as.newHTTPServer(ctx, port, artifactServer)
// Start listener
var conn net.Listener
var listerErr error
address := fmt.Sprintf(":%d", port)
err = wait.ExponentialBackoff(backoff, func() (bool, error) {
conn, listerErr = net.Listen("tcp", address)
if listerErr != nil {
log.Warnf("failed to listen: %v", listerErr)
return false, nil
}
return true, nil
})
if err != nil {
log.Error(err)
return
}
if as.tlsConfig != nil {
conn = tls.NewListener(conn, as.tlsConfig)
}
// Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port
tcpm := cmux.New(conn)
httpL := tcpm.Match(cmux.HTTP1Fast())
grpcL := tcpm.Match(cmux.Any())
go as.configController.Run(as.stopCh, as.restartOnConfigChange)
go eventServer.Run(as.stopCh)
go func() { as.checkServeErr("grpcServer", grpcServer.Serve(grpcL)) }()
go func() { as.checkServeErr("httpServer", httpServer.Serve(httpL)) }()
go func() { as.checkServeErr("tcpm", tcpm.Serve()) }()
url := "http://localhost" + address
if as.tlsConfig != nil {
url = "https://localhost" + address
}
log.WithFields(log.Fields{
"GRPC_MESSAGE_SIZE": MaxGRPCMessageSize,
}).Info("GRPC Server Max Message Size, MaxGRPCMessageSize, is set")
log.Infof("Argo Server started successfully on %s", url)
browserOpenFunc(url)
<-as.stopCh
}
func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo, wfArchive sqldb.WorkflowArchive, eventServer *event.Controller, links []*v1alpha1.Link, navColor string) *grpc.Server {
serverLog := log.NewEntry(log.StandardLogger())
// "Prometheus histograms are a great way to measure latency distributions of your RPCs. However, since it is bad practice to have metrics of high cardinality the latency monitoring metrics are disabled by default. To enable them please call the following in your server initialization code:"
grpc_prometheus.EnableHandlingTimeHistogram()
sOpts := []grpc.ServerOption{
// Set both the send and receive the bytes limit to be 100MB or GRPC_MESSAGE_SIZE
// The proper way to achieve high performance is to have pagination
// while we work toward that, we can have high limit first
grpc.MaxRecvMsgSize(MaxGRPCMessageSize),
grpc.MaxSendMsgSize(MaxGRPCMessageSize),
grpc.ConnectionTimeout(300 * time.Second),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_prometheus.UnaryServerInterceptor,
grpc_logrus.UnaryServerInterceptor(serverLog),
grpcutil.PanicLoggerUnaryServerInterceptor(serverLog),
grpcutil.ErrorTranslationUnaryServerInterceptor,
as.gatekeeper.UnaryServerInterceptor(),
)),
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_prometheus.StreamServerInterceptor,
grpc_logrus.StreamServerInterceptor(serverLog),
grpcutil.PanicLoggerStreamServerInterceptor(serverLog),
grpcutil.ErrorTranslationStreamServerInterceptor,
as.gatekeeper.StreamServerInterceptor(),
)),
}
grpcServer := grpc.NewServer(sOpts...)
infopkg.RegisterInfoServiceServer(grpcServer, info.NewInfoServer(as.managedNamespace, links, navColor))
eventpkg.RegisterEventServiceServer(grpcServer, eventServer)
eventsourcepkg.RegisterEventSourceServiceServer(grpcServer, eventsource.NewEventSourceServer())
pipelinepkg.RegisterPipelineServiceServer(grpcServer, pipeline.NewPipelineServer())
sensorpkg.RegisterSensorServiceServer(grpcServer, sensor.NewSensorServer())
workflowpkg.RegisterWorkflowServiceServer(grpcServer, workflow.NewWorkflowServer(instanceIDService, offloadNodeStatusRepo))
workflowtemplatepkg.RegisterWorkflowTemplateServiceServer(grpcServer, workflowtemplate.NewWorkflowTemplateServer(instanceIDService))
cronworkflowpkg.RegisterCronWorkflowServiceServer(grpcServer, cronworkflow.NewCronWorkflowServer(instanceIDService))
workflowarchivepkg.RegisterArchivedWorkflowServiceServer(grpcServer, workflowarchive.NewWorkflowArchiveServer(wfArchive))
clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceServer(grpcServer, clusterworkflowtemplate.NewClusterWorkflowTemplateServer(instanceIDService))
grpc_prometheus.Register(grpcServer)
return grpcServer
}
// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented
// using grpc-gateway as a proxy to the gRPC server.
func (as *argoServer) newHTTPServer(ctx context.Context, port int, artifactServer *artifacts.ArtifactServer) *http.Server {
endpoint := fmt.Sprintf("localhost:%d", port)
mux := http.NewServeMux()
httpServer := http.Server{
Addr: endpoint,
Handler: mux,
TLSConfig: as.tlsConfig,
}
dialOpts := []grpc.DialOption{
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize)),
}
if as.tlsConfig != nil {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(as.tlsConfig)))
} else {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
webhookInterceptor := webhook.Interceptor(as.clients.Kubernetes)
// HTTP 1.1+JSON Server
// grpc-ecosystem/grpc-gateway is used to proxy HTTP requests to the corresponding gRPC call
// NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from
// golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support
// time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore
// we use our own Marshaler
gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(json.JSONMarshaler))
gwmux := runtime.NewServeMux(gwMuxOpts,
runtime.WithIncomingHeaderMatcher(func(key string) (string, bool) { return key, true }),
runtime.WithProtoErrorHandler(runtime.DefaultHTTPProtoErrorHandler),
)
mustRegisterGWHandler(infopkg.RegisterInfoServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(eventpkg.RegisterEventServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(eventsourcepkg.RegisterEventSourceServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(sensorpkg.RegisterSensorServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(pipelinepkg.RegisterPipelineServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(workflowpkg.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(workflowtemplatepkg.RegisterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(cronworkflowpkg.RegisterCronWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(workflowarchivepkg.RegisterArchivedWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mustRegisterGWHandler(clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts)
mux.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) { webhookInterceptor(w, r, gwmux) })
mux.HandleFunc("/artifacts/", artifactServer.GetOutputArtifact)
mux.HandleFunc("/input-artifacts/", artifactServer.GetInputArtifact)
mux.HandleFunc("/artifacts-by-uid/", artifactServer.GetOutputArtifactByUID)
mux.HandleFunc("/input-artifacts-by-uid/", artifactServer.GetInputArtifactByUID)
mux.Handle("/oauth2/redirect", handlers.ProxyHeaders(http.HandlerFunc(as.oAuth2Service.HandleRedirect)))
mux.Handle("/oauth2/callback", handlers.ProxyHeaders(http.HandlerFunc(as.oAuth2Service.HandleCallback)))
mux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
if os.Getenv("ARGO_SERVER_METRICS_AUTH") != "false" {
header := metadata.New(map[string]string{"authorization": r.Header.Get("Authorization")})
ctx := metadata.NewIncomingContext(context.Background(), header)
if _, err := as.gatekeeper.Context(ctx); err != nil {
log.WithError(err).Error("failed to authenticate /metrics endpoint")
w.WriteHeader(403)
return
}
}
promhttp.Handler().ServeHTTP(w, r)
})
// we only enable HTST if we are secure mode, otherwise you would never be able access the UI
mux.HandleFunc("/", static.NewFilesServer(as.baseHRef, as.tlsConfig != nil && as.hsts, as.xframeOptions, as.accessControlAllowOrigin).ServerFiles)
return &httpServer
}
type registerFunc func(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) error
// mustRegisterGWHandler is a convenience function to register a gateway handler
func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) {
err := register(ctx, mux, endpoint, opts)
if err != nil {
panic(err)
}
}
// Unlike the controller, the server creates object based on the config map at init time, and will not pick-up on
// changes unless we restart.
// Instead of opting to re-write the server, instead we'll just listen for any old change and restart.
func (as *argoServer) restartOnConfigChange(interface{}) error {
log.Info("config map event, exiting gracefully")
as.stopCh <- struct{}{}
return nil
}
// checkServeErr checks the error from a .Serve() call to decide if it was a graceful shutdown
func (as *argoServer) checkServeErr(name string, err error) {
if err != nil {
if as.stopCh == nil {
// a nil stopCh indicates a graceful shutdown
log.Infof("graceful shutdown %s: %v", name, err)
} else {
log.Fatalf("%s: %v", name, err)
}
} else {
log.Infof("graceful shutdown %s", name)
}
}
| [
"\"ARGO_SERVER_METRICS_AUTH\""
]
| []
| [
"ARGO_SERVER_METRICS_AUTH"
]
| [] | ["ARGO_SERVER_METRICS_AUTH"] | go | 1 | 0 | |
test/e2e/storage/vsphere/vsphere_volume_ops_storm.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"fmt"
"os"
"strconv"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to perform Disk Ops storm.
Steps
1. Create storage class for thin Provisioning.
2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files.
3. Wait until all disks are ready and all PVs and PVCs get bind. (CreateVolume storm)
4. Create pod to mount volumes using PVCs created in step 2. (AttachDisk storm)
5. Wait for pod status to be running.
6. Verify all volumes accessible and available in the pod.
7. Delete pod.
8. wait until volumes gets detached. (DetachDisk storm)
9. Delete all PVCs. This should delete all Disks. (DeleteVolume storm)
10. Delete storage class.
*/
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("volume-ops-storm")
const defaultVolumeOpsScale = 30
var (
client clientset.Interface
namespace string
storageclass *storagev1.StorageClass
pvclaims []*v1.PersistentVolumeClaim
persistentvolumes []*v1.PersistentVolume
err error
volumeOpsScale int
)
ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" {
volumeOpsScale, err = strconv.Atoi(scale)
framework.ExpectNoError(err)
} else {
volumeOpsScale = defaultVolumeOpsScale
}
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale)
})
ginkgo.AfterEach(func() {
ginkgo.By("Deleting PVCs")
for _, claim := range pvclaims {
e2epv.DeletePersistentVolumeClaim(client, claim.Name, namespace)
}
ginkgo.By("Deleting StorageClass")
err = client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
})
ginkgo.It("should create pod with many volumes and verify no attach call fails", func() {
ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volumeOpsScale))
ginkgo.By("Creating Storage Class")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("thinsc", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Creating PVCs using the Storage Class")
count := 0
for count < volumeOpsScale {
pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
count++
}
ginkgo.By("Waiting for all claims to be in bound phase")
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PVs to the node")
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err)
ginkgo.By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
ginkgo.By("Deleting pod")
framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod))
ginkgo.By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
})
})
| [
"\"VOLUME_OPS_SCALE\""
]
| []
| [
"VOLUME_OPS_SCALE"
]
| [] | ["VOLUME_OPS_SCALE"] | go | 1 | 0 | |
modules/setting/setting.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package setting
import (
"encoding/base64"
"fmt"
"net"
"net/mail"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"code.gitea.io/git"
"code.gitea.io/gitea/modules/generate"
"code.gitea.io/gitea/modules/log"
_ "code.gitea.io/gitea/modules/minwinsvc" // import minwinsvc for windows services
"code.gitea.io/gitea/modules/user"
"github.com/Unknwon/com"
_ "github.com/go-macaron/cache/memcache" // memcache plugin for cache
_ "github.com/go-macaron/cache/redis"
"github.com/go-macaron/session"
_ "github.com/go-macaron/session/redis" // redis plugin for store session
"github.com/go-xorm/core"
"github.com/kballard/go-shellquote"
"gopkg.in/ini.v1"
"strk.kbt.io/projects/go/libravatar"
)
// Scheme describes protocol types
type Scheme string
// enumerates all the scheme types
const (
HTTP Scheme = "http"
HTTPS Scheme = "https"
FCGI Scheme = "fcgi"
UnixSocket Scheme = "unix"
)
// LandingPage describes the default page
type LandingPage string
// enumerates all the landing page types
const (
LandingPageHome LandingPage = "/"
LandingPageExplore LandingPage = "/explore"
LandingPageOrganizations LandingPage = "/explore/organizations"
)
// MarkupParser defines the external parser configured in ini
type MarkupParser struct {
Enabled bool
MarkupName string
Command string
FileExtensions []string
IsInputFile bool
}
// enumerates all the policy repository creating
const (
RepoCreatingLastUserVisibility = "last"
RepoCreatingPrivate = "private"
RepoCreatingPublic = "public"
)
// enumerates all the types of captchas
const (
ImageCaptcha = "image"
ReCaptcha = "recaptcha"
)
// settings
var (
// AppVer settings
AppVer string
AppBuiltWith string
AppName string
AppURL string
AppSubURL string
AppSubURLDepth int // Number of slashes
AppPath string
AppDataPath string
AppWorkPath string
// Server settings
Protocol Scheme
Domain string
HTTPAddr string
HTTPPort string
LocalURL string
RedirectOtherPort bool
PortToRedirect string
OfflineMode bool
DisableRouterLog bool
CertFile string
KeyFile string
StaticRootPath string
EnableGzip bool
LandingPageURL LandingPage
UnixSocketPermission uint32
EnablePprof bool
PprofDataPath string
SSH = struct {
Disabled bool `ini:"DISABLE_SSH"`
StartBuiltinServer bool `ini:"START_SSH_SERVER"`
BuiltinServerUser string `ini:"BUILTIN_SSH_SERVER_USER"`
Domain string `ini:"SSH_DOMAIN"`
Port int `ini:"SSH_PORT"`
ListenHost string `ini:"SSH_LISTEN_HOST"`
ListenPort int `ini:"SSH_LISTEN_PORT"`
RootPath string `ini:"SSH_ROOT_PATH"`
ServerCiphers []string `ini:"SSH_SERVER_CIPHERS"`
ServerKeyExchanges []string `ini:"SSH_SERVER_KEY_EXCHANGES"`
ServerMACs []string `ini:"SSH_SERVER_MACS"`
KeyTestPath string `ini:"SSH_KEY_TEST_PATH"`
KeygenPath string `ini:"SSH_KEYGEN_PATH"`
AuthorizedKeysBackup bool `ini:"SSH_AUTHORIZED_KEYS_BACKUP"`
MinimumKeySizeCheck bool `ini:"-"`
MinimumKeySizes map[string]int `ini:"-"`
ExposeAnonymous bool `ini:"SSH_EXPOSE_ANONYMOUS"`
}{
Disabled: false,
StartBuiltinServer: false,
Domain: "",
Port: 22,
ServerCiphers: []string{"aes128-ctr", "aes192-ctr", "aes256-ctr", "[email protected]", "arcfour256", "arcfour128"},
ServerKeyExchanges: []string{"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1", "ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", "[email protected]"},
ServerMACs: []string{"[email protected]", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"},
KeygenPath: "ssh-keygen",
}
LFS struct {
StartServer bool `ini:"LFS_START_SERVER"`
ContentPath string `ini:"LFS_CONTENT_PATH"`
JWTSecretBase64 string `ini:"LFS_JWT_SECRET"`
JWTSecretBytes []byte `ini:"-"`
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
}
// Security settings
InstallLock bool
SecretKey string
LogInRememberDays int
CookieUserName string
CookieRememberName string
ReverseProxyAuthUser string
MinPasswordLength int
ImportLocalPaths bool
DisableGitHooks bool
// Database settings
UseSQLite3 bool
UseMySQL bool
UseMSSQL bool
UsePostgreSQL bool
UseTiDB bool
LogSQL bool
// Indexer settings
Indexer struct {
IssuePath string
RepoIndexerEnabled bool
RepoPath string
UpdateQueueLength int
MaxIndexerFileSize int64
}
// Webhook settings
Webhook = struct {
QueueLength int
DeliverTimeout int
SkipTLSVerify bool
Types []string
PagingNum int
}{
QueueLength: 1000,
DeliverTimeout: 5,
SkipTLSVerify: false,
PagingNum: 10,
}
// Repository settings
Repository = struct {
AnsiCharset string
ForcePrivate bool
DefaultPrivate string
MaxCreationLimit int
MirrorQueueLength int
PullRequestQueueLength int
PreferredLicenses []string
DisableHTTPGit bool
UseCompatSSHURI bool
// Repository editor settings
Editor struct {
LineWrapExtensions []string
PreviewableFileModes []string
} `ini:"-"`
// Repository upload settings
Upload struct {
Enabled bool
TempPath string
AllowedTypes []string `delim:"|"`
FileMaxSize int64
MaxFiles int
} `ini:"-"`
// Repository local settings
Local struct {
LocalCopyPath string
LocalWikiPath string
} `ini:"-"`
}{
AnsiCharset: "",
ForcePrivate: false,
DefaultPrivate: RepoCreatingLastUserVisibility,
MaxCreationLimit: -1,
MirrorQueueLength: 1000,
PullRequestQueueLength: 1000,
PreferredLicenses: []string{"Apache License 2.0,MIT License"},
DisableHTTPGit: false,
UseCompatSSHURI: false,
// Repository editor settings
Editor: struct {
LineWrapExtensions []string
PreviewableFileModes []string
}{
LineWrapExtensions: strings.Split(".txt,.md,.markdown,.mdown,.mkd,", ","),
PreviewableFileModes: []string{"markdown"},
},
// Repository upload settings
Upload: struct {
Enabled bool
TempPath string
AllowedTypes []string `delim:"|"`
FileMaxSize int64
MaxFiles int
}{
Enabled: true,
TempPath: "data/tmp/uploads",
AllowedTypes: []string{},
FileMaxSize: 3,
MaxFiles: 5,
},
// Repository local settings
Local: struct {
LocalCopyPath string
LocalWikiPath string
}{
LocalCopyPath: "tmp/local-repo",
LocalWikiPath: "tmp/local-wiki",
},
}
RepoRootPath string
ScriptType = "bash"
// UI settings
UI = struct {
ExplorePagingNum int
IssuePagingNum int
RepoSearchPagingNum int
FeedMaxCommitNum int
GraphMaxCommitNum int
CodeCommentLines int
ReactionMaxUserNum int
ThemeColorMetaTag string
MaxDisplayFileSize int64
ShowUserEmail bool
DefaultTheme string
Admin struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
} `ini:"ui.admin"`
User struct {
RepoPagingNum int
} `ini:"ui.user"`
Meta struct {
Author string
Description string
Keywords string
} `ini:"ui.meta"`
}{
ExplorePagingNum: 20,
IssuePagingNum: 10,
RepoSearchPagingNum: 10,
FeedMaxCommitNum: 5,
GraphMaxCommitNum: 100,
CodeCommentLines: 4,
ReactionMaxUserNum: 10,
ThemeColorMetaTag: `#6cc644`,
MaxDisplayFileSize: 8388608,
DefaultTheme: `gitea`,
Admin: struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
}{
UserPagingNum: 50,
RepoPagingNum: 50,
NoticePagingNum: 25,
OrgPagingNum: 50,
},
User: struct {
RepoPagingNum int
}{
RepoPagingNum: 15,
},
Meta: struct {
Author string
Description string
Keywords string
}{
Author: "Gitea - Git with a cup of tea",
Description: "Gitea (Git with a cup of tea) is a painless self-hosted Git service written in Go",
Keywords: "go,git,self-hosted,gitea",
},
}
// Markdown settings
Markdown = struct {
EnableHardLineBreak bool
CustomURLSchemes []string `ini:"CUSTOM_URL_SCHEMES"`
FileExtensions []string
}{
EnableHardLineBreak: false,
FileExtensions: strings.Split(".md,.markdown,.mdown,.mkd", ","),
}
// Admin settings
Admin struct {
DisableRegularOrgCreation bool
}
// Picture settings
AvatarUploadPath string
AvatarMaxWidth int
AvatarMaxHeight int
GravatarSource string
GravatarSourceURL *url.URL
DisableGravatar bool
EnableFederatedAvatar bool
LibravatarService *libravatar.Libravatar
// Log settings
LogLevel string
LogRootPath string
LogModes []string
LogConfigs []string
// Attachment settings
AttachmentPath string
AttachmentAllowedTypes string
AttachmentMaxSize int64
AttachmentMaxFiles int
AttachmentEnabled bool
// Time settings
TimeFormat string
// Session settings
SessionConfig session.Options
CSRFCookieName = "_csrf"
// Cron tasks
Cron = struct {
UpdateMirror struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.update_mirrors"`
RepoHealthCheck struct {
Enabled bool
RunAtStart bool
Schedule string
Timeout time.Duration
Args []string `delim:" "`
} `ini:"cron.repo_health_check"`
CheckRepoStats struct {
Enabled bool
RunAtStart bool
Schedule string
} `ini:"cron.check_repo_stats"`
ArchiveCleanup struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
} `ini:"cron.archive_cleanup"`
SyncExternalUsers struct {
Enabled bool
RunAtStart bool
Schedule string
UpdateExisting bool
} `ini:"cron.sync_external_users"`
DeletedBranchesCleanup struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
} `ini:"cron.deleted_branches_cleanup"`
}{
UpdateMirror: struct {
Enabled bool
RunAtStart bool
Schedule string
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 10m",
},
RepoHealthCheck: struct {
Enabled bool
RunAtStart bool
Schedule string
Timeout time.Duration
Args []string `delim:" "`
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 24h",
Timeout: 60 * time.Second,
Args: []string{},
},
CheckRepoStats: struct {
Enabled bool
RunAtStart bool
Schedule string
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
},
ArchiveCleanup: struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
OlderThan: 24 * time.Hour,
},
SyncExternalUsers: struct {
Enabled bool
RunAtStart bool
Schedule string
UpdateExisting bool
}{
Enabled: true,
RunAtStart: false,
Schedule: "@every 24h",
UpdateExisting: true,
},
DeletedBranchesCleanup: struct {
Enabled bool
RunAtStart bool
Schedule string
OlderThan time.Duration
}{
Enabled: true,
RunAtStart: true,
Schedule: "@every 24h",
OlderThan: 24 * time.Hour,
},
}
// Git settings
Git = struct {
Version string `ini:"-"`
DisableDiffHighlight bool
MaxGitDiffLines int
MaxGitDiffLineCharacters int
MaxGitDiffFiles int
GCArgs []string `delim:" "`
Timeout struct {
Migrate int
Mirror int
Clone int
Pull int
GC int `ini:"GC"`
} `ini:"git.timeout"`
}{
DisableDiffHighlight: false,
MaxGitDiffLines: 1000,
MaxGitDiffLineCharacters: 5000,
MaxGitDiffFiles: 100,
GCArgs: []string{},
Timeout: struct {
Migrate int
Mirror int
Clone int
Pull int
GC int `ini:"GC"`
}{
Migrate: 600,
Mirror: 300,
Clone: 300,
Pull: 300,
GC: 60,
},
}
// Mirror settings
Mirror struct {
DefaultInterval time.Duration
MinInterval time.Duration
}
// API settings
API = struct {
EnableSwagger bool
MaxResponseItems int
}{
EnableSwagger: true,
MaxResponseItems: 50,
}
U2F = struct {
AppID string
TrustedFacets []string
}{}
// I18n settings
Langs []string
Names []string
dateLangs map[string]string
// Highlight settings are loaded in modules/template/highlight.go
// Other settings
ShowFooterBranding bool
ShowFooterVersion bool
ShowFooterTemplateLoadTime bool
// Global setting objects
Cfg *ini.File
CustomPath string // Custom directory path
CustomConf string
CustomPID string
ProdMode bool
RunUser string
IsWindows bool
HasRobotsTxt bool
InternalToken string // internal access token
IterateBufferSize int
ExternalMarkupParsers []MarkupParser
// UILocation is the location on the UI, so that we can display the time on UI.
// Currently only show the default time.Local, it could be added to app.ini after UI is ready
UILocation = time.Local
)
// DateLang transforms standard language locale name to corresponding value in datetime plugin.
func DateLang(lang string) string {
name, ok := dateLangs[lang]
if ok {
return name
}
return "en"
}
func getAppPath() (string, error) {
var appPath string
var err error
if IsWindows && filepath.IsAbs(os.Args[0]) {
appPath = filepath.Clean(os.Args[0])
} else {
appPath, err = exec.LookPath(os.Args[0])
}
if err != nil {
return "", err
}
appPath, err = filepath.Abs(appPath)
if err != nil {
return "", err
}
// Note: we don't use path.Dir here because it does not handle case
// which path starts with two "/" in Windows: "//psf/Home/..."
return strings.Replace(appPath, "\\", "/", -1), err
}
func getWorkPath(appPath string) string {
workPath := ""
giteaWorkPath := os.Getenv("GITEA_WORK_DIR")
if len(giteaWorkPath) > 0 {
workPath = giteaWorkPath
} else {
i := strings.LastIndex(appPath, "/")
if i == -1 {
workPath = appPath
} else {
workPath = appPath[:i]
}
}
return strings.Replace(workPath, "\\", "/", -1)
}
func init() {
IsWindows = runtime.GOOS == "windows"
log.NewLogger(0, "console", `{"level": 0}`)
var err error
if AppPath, err = getAppPath(); err != nil {
log.Fatal(4, "Failed to get app path: %v", err)
}
AppWorkPath = getWorkPath(AppPath)
}
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
// IsRunUserMatchCurrentUser returns false if configured run user does not match
// actual user that runs the app. The first return value is the actual user name.
// This check is ignored under Windows since SSH remote login is not the main
// method to login on Windows.
func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
if IsWindows {
return "", true
}
currentUser := user.CurrentUsername()
return currentUser, runUser == currentUser
}
func createPIDFile(pidPath string) {
currentPid := os.Getpid()
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create PID folder: %v", err)
}
file, err := os.Create(pidPath)
if err != nil {
log.Fatal(4, "Failed to create PID file: %v", err)
}
defer file.Close()
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
log.Fatal(4, "Failed to write PID information: %v", err)
}
}
// NewContext initializes configuration context.
// NOTE: do not print any log except error.
func NewContext() {
Cfg = ini.Empty()
CustomPath = os.Getenv("GITEA_CUSTOM")
if len(CustomPath) == 0 {
CustomPath = path.Join(AppWorkPath, "custom")
} else if !filepath.IsAbs(CustomPath) {
CustomPath = path.Join(AppWorkPath, CustomPath)
}
if len(CustomPID) > 0 {
createPIDFile(CustomPID)
}
if len(CustomConf) == 0 {
CustomConf = path.Join(CustomPath, "conf/app.ini")
} else if !filepath.IsAbs(CustomConf) {
CustomConf = path.Join(CustomPath, CustomConf)
}
if com.IsFile(CustomConf) {
if err := Cfg.Append(CustomConf); err != nil {
log.Fatal(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
} else {
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
}
Cfg.NameMapper = ini.AllCapsUnderscore
homeDir, err := com.HomeDir()
if err != nil {
log.Fatal(4, "Failed to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
LogLevel = getLogLevel("log", "LEVEL", "Info")
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
forcePathSeparator(LogRootPath)
sec := Cfg.Section("server")
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
Protocol = HTTP
if sec.Key("PROTOCOL").String() == "https" {
Protocol = HTTPS
CertFile = sec.Key("CERT_FILE").String()
KeyFile = sec.Key("KEY_FILE").String()
} else if sec.Key("PROTOCOL").String() == "fcgi" {
Protocol = FCGI
} else if sec.Key("PROTOCOL").String() == "unix" {
Protocol = UnixSocket
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
if err != nil || UnixSocketPermissionParsed > 0777 {
log.Fatal(4, "Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
}
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
}
Domain = sec.Key("DOMAIN").MustString("localhost")
HTTPAddr = sec.Key("HTTP_ADDR").MustString("0.0.0.0")
HTTPPort = sec.Key("HTTP_PORT").MustString("3000")
defaultAppURL := string(Protocol) + "://" + Domain
if (Protocol == HTTP && HTTPPort != "80") || (Protocol == HTTPS && HTTPPort != "443") {
defaultAppURL += ":" + HTTPPort
}
AppURL = sec.Key("ROOT_URL").MustString(defaultAppURL)
AppURL = strings.TrimRight(AppURL, "/") + "/"
// Check if has app suburl.
url, err := url.Parse(AppURL)
if err != nil {
log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppURL, err)
}
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
// This value is empty if site does not have sub-url.
AppSubURL = strings.TrimSuffix(url.Path, "/")
AppSubURLDepth = strings.Count(AppSubURL, "/")
// Check if Domain differs from AppURL domain than update it to AppURL's domain
// TODO: Can be replaced with url.Hostname() when minimal GoLang version is 1.8
urlHostname := strings.SplitN(url.Host, ":", 2)[0]
if urlHostname != Domain && net.ParseIP(urlHostname) == nil {
Domain = urlHostname
}
var defaultLocalURL string
switch Protocol {
case UnixSocket:
defaultLocalURL = "http://unix/"
case FCGI:
defaultLocalURL = AppURL
default:
defaultLocalURL = string(Protocol) + "://"
if HTTPAddr == "0.0.0.0" {
defaultLocalURL += "localhost"
} else {
defaultLocalURL += HTTPAddr
}
defaultLocalURL += ":" + HTTPPort + "/"
}
LocalURL = sec.Key("LOCAL_ROOT_URL").MustString(defaultLocalURL)
RedirectOtherPort = sec.Key("REDIRECT_OTHER_PORT").MustBool(false)
PortToRedirect = sec.Key("PORT_TO_REDIRECT").MustString("80")
OfflineMode = sec.Key("OFFLINE_MODE").MustBool()
DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool()
StaticRootPath = sec.Key("STATIC_ROOT_PATH").MustString(AppWorkPath)
AppDataPath = sec.Key("APP_DATA_PATH").MustString(path.Join(AppWorkPath, "data"))
EnableGzip = sec.Key("ENABLE_GZIP").MustBool()
EnablePprof = sec.Key("ENABLE_PPROF").MustBool(false)
PprofDataPath = sec.Key("PPROF_DATA_PATH").MustString(path.Join(AppWorkPath, "data/tmp/pprof"))
if !filepath.IsAbs(PprofDataPath) {
PprofDataPath = filepath.Join(AppWorkPath, PprofDataPath)
}
switch sec.Key("LANDING_PAGE").MustString("home") {
case "explore":
LandingPageURL = LandingPageExplore
case "organizations":
LandingPageURL = LandingPageOrganizations
default:
LandingPageURL = LandingPageHome
}
if len(SSH.Domain) == 0 {
SSH.Domain = Domain
}
SSH.RootPath = path.Join(homeDir, ".ssh")
serverCiphers := sec.Key("SSH_SERVER_CIPHERS").Strings(",")
if len(serverCiphers) > 0 {
SSH.ServerCiphers = serverCiphers
}
serverKeyExchanges := sec.Key("SSH_SERVER_KEY_EXCHANGES").Strings(",")
if len(serverKeyExchanges) > 0 {
SSH.ServerKeyExchanges = serverKeyExchanges
}
serverMACs := sec.Key("SSH_SERVER_MACS").Strings(",")
if len(serverMACs) > 0 {
SSH.ServerMACs = serverMACs
}
SSH.KeyTestPath = os.TempDir()
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
log.Fatal(4, "Failed to map SSH settings: %v", err)
}
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
SSH.Port = sec.Key("SSH_PORT").MustInt(22)
SSH.ListenPort = sec.Key("SSH_LISTEN_PORT").MustInt(SSH.Port)
// When disable SSH, start builtin server value is ignored.
if SSH.Disabled {
SSH.StartBuiltinServer = false
}
if !SSH.Disabled && !SSH.StartBuiltinServer {
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.RootPath, err)
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.KeyTestPath, err)
}
}
SSH.MinimumKeySizeCheck = sec.Key("MINIMUM_KEY_SIZE_CHECK").MustBool()
SSH.MinimumKeySizes = map[string]int{}
minimumKeySizes := Cfg.Section("ssh.minimum_key_sizes").Keys()
for _, key := range minimumKeySizes {
if key.MustInt() != -1 {
SSH.MinimumKeySizes[strings.ToLower(key.Name())] = key.MustInt()
}
}
SSH.AuthorizedKeysBackup = sec.Key("SSH_AUTHORIZED_KEYS_BACKUP").MustBool(true)
SSH.ExposeAnonymous = sec.Key("SSH_EXPOSE_ANONYMOUS").MustBool(false)
sec = Cfg.Section("server")
if err = sec.MapTo(&LFS); err != nil {
log.Fatal(4, "Failed to map LFS settings: %v", err)
}
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
if !filepath.IsAbs(LFS.ContentPath) {
LFS.ContentPath = filepath.Join(AppWorkPath, LFS.ContentPath)
}
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(20 * time.Minute)
if LFS.StartServer {
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", LFS.ContentPath, err)
}
LFS.JWTSecretBytes = make([]byte, 32)
n, err := base64.RawURLEncoding.Decode(LFS.JWTSecretBytes, []byte(LFS.JWTSecretBase64))
if err != nil || n != 32 {
LFS.JWTSecretBase64, err = generate.NewLfsJwtSecret()
if err != nil {
log.Fatal(4, "Error generating JWT Secret for custom config: %v", err)
return
}
// Save secret
cfg := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfg.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfg.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
return
}
}
//Disable LFS client hooks if installed for the current OS user
//Needs at least git v2.1.2
binVersion, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Error retrieving git version: %v", err)
}
splitVersion := strings.SplitN(binVersion, ".", 4)
majorVersion, err := strconv.ParseUint(splitVersion[0], 10, 64)
if err != nil {
log.Fatal(4, "Error parsing git major version: %v", err)
}
minorVersion, err := strconv.ParseUint(splitVersion[1], 10, 64)
if err != nil {
log.Fatal(4, "Error parsing git minor version: %v", err)
}
revisionVersion, err := strconv.ParseUint(splitVersion[2], 10, 64)
if err != nil {
log.Fatal(4, "Error parsing git revision version: %v", err)
}
if !((majorVersion > 2) || (majorVersion == 2 && minorVersion > 1) ||
(majorVersion == 2 && minorVersion == 1 && revisionVersion >= 2)) {
LFS.StartServer = false
log.Error(4, "LFS server support needs at least Git v2.1.2")
} else {
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
}
}
sec = Cfg.Section("security")
InstallLock = sec.Key("INSTALL_LOCK").MustBool(false)
SecretKey = sec.Key("SECRET_KEY").MustString("!#@FDEWREWR&*(")
LogInRememberDays = sec.Key("LOGIN_REMEMBER_DAYS").MustInt(7)
CookieUserName = sec.Key("COOKIE_USERNAME").MustString("gitea_awesome")
CookieRememberName = sec.Key("COOKIE_REMEMBER_NAME").MustString("gitea_incredible")
ReverseProxyAuthUser = sec.Key("REVERSE_PROXY_AUTHENTICATION_USER").MustString("X-WEBAUTH-USER")
MinPasswordLength = sec.Key("MIN_PASSWORD_LENGTH").MustInt(6)
ImportLocalPaths = sec.Key("IMPORT_LOCAL_PATHS").MustBool(false)
DisableGitHooks = sec.Key("DISABLE_GIT_HOOKS").MustBool(false)
InternalToken = sec.Key("INTERNAL_TOKEN").String()
if len(InternalToken) == 0 {
InternalToken, err = generate.NewInternalToken()
if err != nil {
log.Fatal(4, "Error generate internal token: %v", err)
}
// Save secret
cfgSave := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfgSave.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(InternalToken)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfgSave.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
}
}
IterateBufferSize = Cfg.Section("database").Key("ITERATE_BUFFER_SIZE").MustInt(50)
LogSQL = Cfg.Section("database").Key("LOG_SQL").MustBool(true)
sec = Cfg.Section("attachment")
AttachmentPath = sec.Key("PATH").MustString(path.Join(AppDataPath, "attachments"))
if !filepath.IsAbs(AttachmentPath) {
AttachmentPath = path.Join(AppWorkPath, AttachmentPath)
}
AttachmentAllowedTypes = strings.Replace(sec.Key("ALLOWED_TYPES").MustString("image/jpeg,image/png,application/zip,application/gzip"), "|", ",", -1)
AttachmentMaxSize = sec.Key("MAX_SIZE").MustInt64(4)
AttachmentMaxFiles = sec.Key("MAX_FILES").MustInt(5)
AttachmentEnabled = sec.Key("ENABLED").MustBool(true)
TimeFormatKey := Cfg.Section("time").Key("FORMAT").MustString("RFC1123")
TimeFormat = map[string]string{
"ANSIC": time.ANSIC,
"UnixDate": time.UnixDate,
"RubyDate": time.RubyDate,
"RFC822": time.RFC822,
"RFC822Z": time.RFC822Z,
"RFC850": time.RFC850,
"RFC1123": time.RFC1123,
"RFC1123Z": time.RFC1123Z,
"RFC3339": time.RFC3339,
"RFC3339Nano": time.RFC3339Nano,
"Kitchen": time.Kitchen,
"Stamp": time.Stamp,
"StampMilli": time.StampMilli,
"StampMicro": time.StampMicro,
"StampNano": time.StampNano,
}[TimeFormatKey]
// When the TimeFormatKey does not exist in the previous map e.g.'2006-01-02 15:04:05'
if len(TimeFormat) == 0 {
TimeFormat = TimeFormatKey
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
log.Fatal(4, "Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
}
log.Trace("Custom TimeFormat: %s", TimeFormat)
}
RunUser = Cfg.Section("").Key("RUN_USER").MustString(user.CurrentUsername())
// Does not check run user when the install lock is off.
if InstallLock {
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
if !match {
log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
}
}
SSH.BuiltinServerUser = Cfg.Section("server").Key("BUILTIN_SSH_SERVER_USER").MustString(RunUser)
// Determine and create root git repository path.
sec = Cfg.Section("repository")
Repository.DisableHTTPGit = sec.Key("DISABLE_HTTP_GIT").MustBool()
Repository.UseCompatSSHURI = sec.Key("USE_COMPAT_SSH_URI").MustBool()
Repository.MaxCreationLimit = sec.Key("MAX_CREATION_LIMIT").MustInt(-1)
RepoRootPath = sec.Key("ROOT").MustString(path.Join(homeDir, "gitea-repositories"))
forcePathSeparator(RepoRootPath)
if !filepath.IsAbs(RepoRootPath) {
RepoRootPath = filepath.Join(AppWorkPath, RepoRootPath)
} else {
RepoRootPath = filepath.Clean(RepoRootPath)
}
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
log.Fatal(4, "Failed to map Repository settings: %v", err)
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
log.Fatal(4, "Failed to map Repository.Editor settings: %v", err)
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
log.Fatal(4, "Failed to map Repository.Upload settings: %v", err)
} else if err = Cfg.Section("repository.local").MapTo(&Repository.Local); err != nil {
log.Fatal(4, "Failed to map Repository.Local settings: %v", err)
}
if !filepath.IsAbs(Repository.Upload.TempPath) {
Repository.Upload.TempPath = path.Join(AppWorkPath, Repository.Upload.TempPath)
}
sec = Cfg.Section("picture")
AvatarUploadPath = sec.Key("AVATAR_UPLOAD_PATH").MustString(path.Join(AppDataPath, "avatars"))
forcePathSeparator(AvatarUploadPath)
if !filepath.IsAbs(AvatarUploadPath) {
AvatarUploadPath = path.Join(AppWorkPath, AvatarUploadPath)
}
AvatarMaxWidth = sec.Key("AVATAR_MAX_WIDTH").MustInt(4096)
AvatarMaxHeight = sec.Key("AVATAR_MAX_HEIGHT").MustInt(3072)
switch source := sec.Key("GRAVATAR_SOURCE").MustString("gravatar"); source {
case "duoshuo":
GravatarSource = "http://gravatar.duoshuo.com/avatar/"
case "gravatar":
GravatarSource = "https://secure.gravatar.com/avatar/"
case "libravatar":
GravatarSource = "https://seccdn.libravatar.org/avatar/"
default:
GravatarSource = source
}
DisableGravatar = sec.Key("DISABLE_GRAVATAR").MustBool()
EnableFederatedAvatar = sec.Key("ENABLE_FEDERATED_AVATAR").MustBool(!InstallLock)
if OfflineMode {
DisableGravatar = true
EnableFederatedAvatar = false
}
if DisableGravatar {
EnableFederatedAvatar = false
}
if EnableFederatedAvatar || !DisableGravatar {
GravatarSourceURL, err = url.Parse(GravatarSource)
if err != nil {
log.Fatal(4, "Failed to parse Gravatar URL(%s): %v",
GravatarSource, err)
}
}
if EnableFederatedAvatar {
LibravatarService = libravatar.New()
if GravatarSourceURL.Scheme == "https" {
LibravatarService.SetUseHTTPS(true)
LibravatarService.SetSecureFallbackHost(GravatarSourceURL.Host)
} else {
LibravatarService.SetUseHTTPS(false)
LibravatarService.SetFallbackHost(GravatarSourceURL.Host)
}
}
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
log.Fatal(4, "Failed to map UI settings: %v", err)
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
log.Fatal(4, "Failed to map Markdown settings: %v", err)
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
log.Fatal(4, "Fail to map Admin settings: %v", err)
} else if err = Cfg.Section("cron").MapTo(&Cron); err != nil {
log.Fatal(4, "Failed to map Cron settings: %v", err)
} else if err = Cfg.Section("git").MapTo(&Git); err != nil {
log.Fatal(4, "Failed to map Git settings: %v", err)
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
log.Fatal(4, "Failed to map API settings: %v", err)
}
sec = Cfg.Section("mirror")
Mirror.MinInterval = sec.Key("MIN_INTERVAL").MustDuration(10 * time.Minute)
Mirror.DefaultInterval = sec.Key("DEFAULT_INTERVAL").MustDuration(8 * time.Hour)
if Mirror.MinInterval.Minutes() < 1 {
log.Warn("Mirror.MinInterval is too low")
Mirror.MinInterval = 1 * time.Minute
}
if Mirror.DefaultInterval < Mirror.MinInterval {
log.Warn("Mirror.DefaultInterval is less than Mirror.MinInterval")
Mirror.DefaultInterval = time.Hour * 8
}
Langs = Cfg.Section("i18n").Key("LANGS").Strings(",")
if len(Langs) == 0 {
Langs = defaultLangs
}
Names = Cfg.Section("i18n").Key("NAMES").Strings(",")
if len(Names) == 0 {
Names = defaultLangNames
}
dateLangs = Cfg.Section("i18n.datelang").KeysHash()
ShowFooterBranding = Cfg.Section("other").Key("SHOW_FOOTER_BRANDING").MustBool(false)
ShowFooterVersion = Cfg.Section("other").Key("SHOW_FOOTER_VERSION").MustBool(true)
ShowFooterTemplateLoadTime = Cfg.Section("other").Key("SHOW_FOOTER_TEMPLATE_LOAD_TIME").MustBool(true)
UI.ShowUserEmail = Cfg.Section("ui").Key("SHOW_USER_EMAIL").MustBool(true)
HasRobotsTxt = com.IsFile(path.Join(CustomPath, "robots.txt"))
extensionReg := regexp.MustCompile(`\.\w`)
for _, sec := range Cfg.Section("markup").ChildSections() {
name := strings.TrimLeft(sec.Name(), "markup.")
if name == "" {
log.Warn("name is empty, markup " + sec.Name() + "ignored")
continue
}
extensions := sec.Key("FILE_EXTENSIONS").Strings(",")
var exts = make([]string, 0, len(extensions))
for _, extension := range extensions {
if !extensionReg.MatchString(extension) {
log.Warn(sec.Name() + " file extension " + extension + " is invalid. Extension ignored")
} else {
exts = append(exts, extension)
}
}
if len(exts) == 0 {
log.Warn(sec.Name() + " file extension is empty, markup " + name + " ignored")
continue
}
command := sec.Key("RENDER_COMMAND").MustString("")
if command == "" {
log.Warn(" RENDER_COMMAND is empty, markup " + name + " ignored")
continue
}
ExternalMarkupParsers = append(ExternalMarkupParsers, MarkupParser{
Enabled: sec.Key("ENABLED").MustBool(false),
MarkupName: name,
FileExtensions: exts,
Command: command,
IsInputFile: sec.Key("IS_INPUT_FILE").MustBool(false),
})
}
sec = Cfg.Section("U2F")
U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/")))
U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/"))
}
// Service settings
var Service struct {
ActiveCodeLives int
ResetPwdCodeLives int
RegisterEmailConfirm bool
DisableRegistration bool
AllowOnlyExternalRegistration bool
ShowRegistrationButton bool
RequireSignInView bool
EnableNotifyMail bool
EnableReverseProxyAuth bool
EnableReverseProxyAutoRegister bool
EnableCaptcha bool
CaptchaType string
RecaptchaSecret string
RecaptchaSitekey string
DefaultKeepEmailPrivate bool
DefaultAllowCreateOrganization bool
EnableTimetracking bool
DefaultEnableTimetracking bool
DefaultEnableDependencies bool
DefaultAllowOnlyContributorsToTrackTime bool
NoReplyAddress string
// OpenID settings
EnableOpenIDSignIn bool
EnableOpenIDSignUp bool
OpenIDWhitelist []*regexp.Regexp
OpenIDBlacklist []*regexp.Regexp
}
func newService() {
sec := Cfg.Section("service")
Service.ActiveCodeLives = sec.Key("ACTIVE_CODE_LIVE_MINUTES").MustInt(180)
Service.ResetPwdCodeLives = sec.Key("RESET_PASSWD_CODE_LIVE_MINUTES").MustInt(180)
Service.DisableRegistration = sec.Key("DISABLE_REGISTRATION").MustBool()
Service.AllowOnlyExternalRegistration = sec.Key("ALLOW_ONLY_EXTERNAL_REGISTRATION").MustBool()
Service.ShowRegistrationButton = sec.Key("SHOW_REGISTRATION_BUTTON").MustBool(!(Service.DisableRegistration || Service.AllowOnlyExternalRegistration))
Service.RequireSignInView = sec.Key("REQUIRE_SIGNIN_VIEW").MustBool()
Service.EnableReverseProxyAuth = sec.Key("ENABLE_REVERSE_PROXY_AUTHENTICATION").MustBool()
Service.EnableReverseProxyAutoRegister = sec.Key("ENABLE_REVERSE_PROXY_AUTO_REGISTRATION").MustBool()
Service.EnableCaptcha = sec.Key("ENABLE_CAPTCHA").MustBool(false)
Service.CaptchaType = sec.Key("CAPTCHA_TYPE").MustString(ImageCaptcha)
Service.RecaptchaSecret = sec.Key("RECAPTCHA_SECRET").MustString("")
Service.RecaptchaSitekey = sec.Key("RECAPTCHA_SITEKEY").MustString("")
Service.DefaultKeepEmailPrivate = sec.Key("DEFAULT_KEEP_EMAIL_PRIVATE").MustBool()
Service.DefaultAllowCreateOrganization = sec.Key("DEFAULT_ALLOW_CREATE_ORGANIZATION").MustBool(true)
Service.EnableTimetracking = sec.Key("ENABLE_TIMETRACKING").MustBool(true)
if Service.EnableTimetracking {
Service.DefaultEnableTimetracking = sec.Key("DEFAULT_ENABLE_TIMETRACKING").MustBool(true)
}
Service.DefaultEnableDependencies = sec.Key("DEFAULT_ENABLE_DEPENDENCIES").MustBool(true)
Service.DefaultAllowOnlyContributorsToTrackTime = sec.Key("DEFAULT_ALLOW_ONLY_CONTRIBUTORS_TO_TRACK_TIME").MustBool(true)
Service.NoReplyAddress = sec.Key("NO_REPLY_ADDRESS").MustString("noreply.example.org")
sec = Cfg.Section("openid")
Service.EnableOpenIDSignIn = sec.Key("ENABLE_OPENID_SIGNIN").MustBool(!InstallLock)
Service.EnableOpenIDSignUp = sec.Key("ENABLE_OPENID_SIGNUP").MustBool(!Service.DisableRegistration && Service.EnableOpenIDSignIn)
pats := sec.Key("WHITELISTED_URIS").Strings(" ")
if len(pats) != 0 {
Service.OpenIDWhitelist = make([]*regexp.Regexp, len(pats))
for i, p := range pats {
Service.OpenIDWhitelist[i] = regexp.MustCompilePOSIX(p)
}
}
pats = sec.Key("BLACKLISTED_URIS").Strings(" ")
if len(pats) != 0 {
Service.OpenIDBlacklist = make([]*regexp.Regexp, len(pats))
for i, p := range pats {
Service.OpenIDBlacklist[i] = regexp.MustCompilePOSIX(p)
}
}
}
var logLevels = map[string]string{
"Trace": "0",
"Debug": "1",
"Info": "2",
"Warn": "3",
"Error": "4",
"Critical": "5",
}
func getLogLevel(section string, key string, defaultValue string) string {
validLevels := []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}
return Cfg.Section(section).Key(key).In(defaultValue, validLevels)
}
func newLogService() {
log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
LogModes = strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
LogConfigs = make([]string, len(LogModes))
useConsole := false
for i := 0; i < len(LogModes); i++ {
LogModes[i] = strings.TrimSpace(LogModes[i])
if LogModes[i] == "console" {
useConsole = true
}
}
if !useConsole {
log.DelLogger("console")
}
for i, mode := range LogModes {
sec, err := Cfg.GetSection("log." + mode)
if err != nil {
sec, _ = Cfg.NewSection("log." + mode)
}
// Log level.
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
level, ok := logLevels[levelName]
if !ok {
log.Fatal(4, "Unknown log level: %s", levelName)
}
// Generate log configuration.
switch mode {
case "console":
LogConfigs[i] = fmt.Sprintf(`{"level":%s}`, level)
case "file":
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "gitea.log"))
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
LogConfigs[i] = fmt.Sprintf(
`{"level":%s,"filename":"%s","rotate":%v,"maxlines":%d,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
logPath,
sec.Key("LOG_ROTATE").MustBool(true),
sec.Key("MAX_LINES").MustInt(1000000),
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
sec.Key("DAILY_ROTATE").MustBool(true),
sec.Key("MAX_DAYS").MustInt(7))
case "conn":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
sec.Key("RECONNECT_ON_MSG").MustBool(),
sec.Key("RECONNECT").MustBool(),
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
sec.Key("ADDR").MustString(":7020"))
case "smtp":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":["%s"],"subject":"%s"}`, level,
sec.Key("USER").MustString("[email protected]"),
sec.Key("PASSWD").MustString("******"),
sec.Key("HOST").MustString("127.0.0.1:25"),
strings.Replace(sec.Key("RECEIVERS").MustString("[email protected]"), ",", "\",\"", -1),
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
case "database":
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
sec.Key("DRIVER").String(),
sec.Key("CONN").String())
}
log.NewLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, LogConfigs[i])
log.Info("Log Mode: %s(%s)", strings.Title(mode), levelName)
}
}
// NewXORMLogService initializes xorm logger service
func NewXORMLogService(disableConsole bool) {
logModes := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
var logConfigs string
for _, mode := range logModes {
mode = strings.TrimSpace(mode)
if disableConsole && mode == "console" {
continue
}
sec, err := Cfg.GetSection("log." + mode)
if err != nil {
sec, _ = Cfg.NewSection("log." + mode)
}
// Log level.
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
level, ok := logLevels[levelName]
if !ok {
log.Fatal(4, "Unknown log level: %s", levelName)
}
// Generate log configuration.
switch mode {
case "console":
logConfigs = fmt.Sprintf(`{"level":%s}`, level)
case "file":
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "xorm.log"))
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
logPath = path.Join(filepath.Dir(logPath), "xorm.log")
logConfigs = fmt.Sprintf(
`{"level":%s,"filename":"%s","rotate":%v,"maxlines":%d,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
logPath,
sec.Key("LOG_ROTATE").MustBool(true),
sec.Key("MAX_LINES").MustInt(1000000),
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
sec.Key("DAILY_ROTATE").MustBool(true),
sec.Key("MAX_DAYS").MustInt(7))
case "conn":
logConfigs = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
sec.Key("RECONNECT_ON_MSG").MustBool(),
sec.Key("RECONNECT").MustBool(),
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
sec.Key("ADDR").MustString(":7020"))
case "smtp":
logConfigs = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":"%s","subject":"%s"}`, level,
sec.Key("USER").MustString("[email protected]"),
sec.Key("PASSWD").MustString("******"),
sec.Key("HOST").MustString("127.0.0.1:25"),
sec.Key("RECEIVERS").MustString("[]"),
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
case "database":
logConfigs = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
sec.Key("DRIVER").String(),
sec.Key("CONN").String())
}
log.NewXORMLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, logConfigs)
if !disableConsole {
log.Info("XORM Log Mode: %s(%s)", strings.Title(mode), levelName)
}
var lvl core.LogLevel
switch levelName {
case "Trace", "Debug":
lvl = core.LOG_DEBUG
case "Info":
lvl = core.LOG_INFO
case "Warn":
lvl = core.LOG_WARNING
case "Error", "Critical":
lvl = core.LOG_ERR
}
log.XORMLogger.SetLevel(lvl)
}
if len(logConfigs) == 0 {
log.DiscardXORMLogger()
}
}
// Cache represents cache settings
type Cache struct {
Adapter string
Interval int
Conn string
TTL time.Duration
}
var (
// CacheService the global cache
CacheService *Cache
)
func newCacheService() {
sec := Cfg.Section("cache")
CacheService = &Cache{
Adapter: sec.Key("ADAPTER").In("memory", []string{"memory", "redis", "memcache"}),
}
switch CacheService.Adapter {
case "memory":
CacheService.Interval = sec.Key("INTERVAL").MustInt(60)
case "redis", "memcache":
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
default:
log.Fatal(4, "Unknown cache adapter: %s", CacheService.Adapter)
}
CacheService.TTL = sec.Key("ITEM_TTL").MustDuration(16 * time.Hour)
log.Info("Cache Service Enabled")
}
func newSessionService() {
SessionConfig.Provider = Cfg.Section("session").Key("PROVIDER").In("memory",
[]string{"memory", "file", "redis", "mysql"})
SessionConfig.ProviderConfig = strings.Trim(Cfg.Section("session").Key("PROVIDER_CONFIG").MustString(path.Join(AppDataPath, "sessions")), "\" ")
if SessionConfig.Provider == "file" && !filepath.IsAbs(SessionConfig.ProviderConfig) {
SessionConfig.ProviderConfig = path.Join(AppWorkPath, SessionConfig.ProviderConfig)
}
SessionConfig.CookieName = Cfg.Section("session").Key("COOKIE_NAME").MustString("i_like_gitea")
SessionConfig.CookiePath = AppSubURL
SessionConfig.Secure = Cfg.Section("session").Key("COOKIE_SECURE").MustBool(false)
SessionConfig.Gclifetime = Cfg.Section("session").Key("GC_INTERVAL_TIME").MustInt64(86400)
SessionConfig.Maxlifetime = Cfg.Section("session").Key("SESSION_LIFE_TIME").MustInt64(86400)
log.Info("Session Service Enabled")
}
// Mailer represents mail service.
type Mailer struct {
// Mailer
QueueLength int
Name string
From string
FromName string
FromEmail string
SendAsPlainText bool
// SMTP sender
Host string
User, Passwd string
DisableHelo bool
HeloHostname string
SkipVerify bool
UseCertificate bool
CertFile, KeyFile string
// Sendmail sender
UseSendmail bool
SendmailPath string
SendmailArgs []string
}
var (
// MailService the global mailer
MailService *Mailer
)
func newMailService() {
sec := Cfg.Section("mailer")
// Check mailer setting.
if !sec.Key("ENABLED").MustBool() {
return
}
MailService = &Mailer{
QueueLength: sec.Key("SEND_BUFFER_LEN").MustInt(100),
Name: sec.Key("NAME").MustString(AppName),
SendAsPlainText: sec.Key("SEND_AS_PLAIN_TEXT").MustBool(false),
Host: sec.Key("HOST").String(),
User: sec.Key("USER").String(),
Passwd: sec.Key("PASSWD").String(),
DisableHelo: sec.Key("DISABLE_HELO").MustBool(),
HeloHostname: sec.Key("HELO_HOSTNAME").String(),
SkipVerify: sec.Key("SKIP_VERIFY").MustBool(),
UseCertificate: sec.Key("USE_CERTIFICATE").MustBool(),
CertFile: sec.Key("CERT_FILE").String(),
KeyFile: sec.Key("KEY_FILE").String(),
UseSendmail: sec.Key("USE_SENDMAIL").MustBool(),
SendmailPath: sec.Key("SENDMAIL_PATH").MustString("sendmail"),
}
MailService.From = sec.Key("FROM").MustString(MailService.User)
if sec.HasKey("ENABLE_HTML_ALTERNATIVE") {
log.Warn("ENABLE_HTML_ALTERNATIVE is deprecated, use SEND_AS_PLAIN_TEXT")
MailService.SendAsPlainText = !sec.Key("ENABLE_HTML_ALTERNATIVE").MustBool(false)
}
parsed, err := mail.ParseAddress(MailService.From)
if err != nil {
log.Fatal(4, "Invalid mailer.FROM (%s): %v", MailService.From, err)
}
MailService.FromName = parsed.Name
MailService.FromEmail = parsed.Address
if MailService.UseSendmail {
MailService.SendmailArgs, err = shellquote.Split(sec.Key("SENDMAIL_ARGS").String())
if err != nil {
log.Error(4, "Failed to parse Sendmail args: %v", CustomConf, err)
}
}
log.Info("Mail Service Enabled")
}
func newRegisterMailService() {
if !Cfg.Section("service").Key("REGISTER_EMAIL_CONFIRM").MustBool() {
return
} else if MailService == nil {
log.Warn("Register Mail Service: Mail Service is not enabled")
return
}
Service.RegisterEmailConfirm = true
log.Info("Register Mail Service Enabled")
}
func newNotifyMailService() {
if !Cfg.Section("service").Key("ENABLE_NOTIFY_MAIL").MustBool() {
return
} else if MailService == nil {
log.Warn("Notify Mail Service: Mail Service is not enabled")
return
}
Service.EnableNotifyMail = true
log.Info("Notify Mail Service Enabled")
}
func newWebhookService() {
sec := Cfg.Section("webhook")
Webhook.QueueLength = sec.Key("QUEUE_LENGTH").MustInt(1000)
Webhook.DeliverTimeout = sec.Key("DELIVER_TIMEOUT").MustInt(5)
Webhook.SkipTLSVerify = sec.Key("SKIP_TLS_VERIFY").MustBool()
Webhook.Types = []string{"gitea", "gogs", "slack", "discord", "dingtalk"}
Webhook.PagingNum = sec.Key("PAGING_NUM").MustInt(10)
}
// NewServices initializes the services
func NewServices() {
newService()
newLogService()
NewXORMLogService(false)
newCacheService()
newSessionService()
newMailService()
newRegisterMailService()
newNotifyMailService()
newWebhookService()
}
| [
"\"GITEA_WORK_DIR\"",
"\"GITEA_CUSTOM\""
]
| []
| [
"GITEA_CUSTOM",
"GITEA_WORK_DIR"
]
| [] | ["GITEA_CUSTOM", "GITEA_WORK_DIR"] | go | 2 | 0 | |
tests/vmi_multus_test.go | /*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package tests_test
import (
"fmt"
"net"
"os"
"strings"
"time"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/tests"
"kubevirt.io/kubevirt/tests/console"
cd "kubevirt.io/kubevirt/tests/containerdisk"
"kubevirt.io/kubevirt/tests/flags"
"kubevirt.io/kubevirt/tests/libnet"
"kubevirt.io/kubevirt/tests/libvmi"
)
const (
postUrl = "/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s"
linuxBridgeConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"bridge\", \"bridge\": \"br10\", \"vlan\": 100, \"ipam\": {}},{\"type\": \"tuning\"}]}"}}`
ptpConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"ptp\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" }},{\"type\": \"tuning\"}]}"}}`
sriovConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"sriov\", \"type\": \"sriov\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
sriovLinkEnableConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"sriov\", \"type\": \"sriov\", \"link_state\": \"enable\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
macvtapNetworkConf = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s", "annotations": {"k8s.v1.cni.cncf.io/resourceName": "macvtap.network.kubevirt.io/%s"}},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"%s\", \"type\": \"macvtap\"}"}}`
)
var _ = Describe("[Serial]Multus", func() {
var err error
var virtClient kubecli.KubevirtClient
var nodes *k8sv1.NodeList
defaultInterface := v1.Interface{
Name: "default",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Masquerade: &v1.InterfaceMasquerade{},
},
}
linuxBridgeInterface := v1.Interface{
Name: "linux-bridge",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
defaultNetwork := v1.Network{
Name: "default",
NetworkSource: v1.NetworkSource{
Pod: &v1.PodNetwork{},
},
}
linuxBridgeNetwork := v1.Network{
Name: "linux-bridge",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "linux-bridge-net-vlan100",
},
},
}
BeforeEach(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
createVMIOnNode := func(interfaces []v1.Interface, networks []v1.Network) *v1.VirtualMachineInstance {
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = interfaces
vmi.Spec.Networks = networks
// Arbitrarily select one compute node in the cluster, on which it is possible to create a VMI
// (i.e. a schedulable node).
nodeName := nodes.Items[0].Name
tests.StartVmOnNode(vmi, nodeName)
return vmi
}
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
nodes = tests.GetAllSchedulableNodes(virtClient)
Expect(len(nodes.Items) > 0).To(BeTrue())
configureNodeNetwork(virtClient)
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "linux-bridge-net-vlan100")).
Body([]byte(fmt.Sprintf(linuxBridgeConfCRD, "linux-bridge-net-vlan100", tests.NamespaceTestDefault))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
// Create ptp crds with tuning plugin enabled in two different namespaces
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "ptp-conf-1")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-1", tests.NamespaceTestDefault))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestAlternative, "ptp-conf-2")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-2", tests.NamespaceTestAlternative))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
})
Describe("[rfe_id:694][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance using different types of interfaces.", func() {
Context("VirtualMachineInstance with cni ptp plugin interface", func() {
It("[test_id:1751]should create a virtual machine with one interface", func() {
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
Expect(libnet.PingFromVMConsole(detachedVMI, "10.1.1.1")).To(Succeed())
})
It("[test_id:1752]should create a virtual machine with one interface with network definition from different namespace", func() {
tests.SkipIfOpenShift4("OpenShift 4 does not support usage of the network definition from the different namespace")
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestAlternative, "ptp-conf-2")},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
Expect(libnet.PingFromVMConsole(detachedVMI, "10.1.1.1")).To(Succeed())
})
It("[test_id:1753]should create a virtual machine with two interfaces", func() {
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
defaultNetwork,
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
cmdCheck := "sudo /sbin/cirros-dhcpc up eth1 > /dev/null\n"
err = console.SafeExpectBatch(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "ip addr show eth1 | grep 10.1.1 | wc -l\n"},
&expect.BExp{R: console.RetValue("1")},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking virtual machine instance has two interfaces")
Expect(checkInterface(detachedVMI, "eth0")).To(Succeed())
Expect(checkInterface(detachedVMI, "eth1")).To(Succeed())
Expect(libnet.PingFromVMConsole(detachedVMI, "10.1.1.1")).To(Succeed())
})
})
Context("VirtualMachineInstance with multus network as default network", func() {
It("[test_id:1751]should create a virtual machine with one interface with multus default network definition", func() {
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestDefault, "ptp-conf-1"),
Default: true,
}}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
Expect(libnet.PingFromVMConsole(detachedVMI, "10.1.1.1")).To(Succeed())
By("checking virtual machine instance only has one interface")
// lo0, eth0
err = console.SafeExpectBatch(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "ip link show | grep -c UP\n"},
&expect.BExp{R: "2"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking pod has only one interface")
// lo0, eth0, k6t-eth0, vnet0
output := tests.RunCommandOnVmiPod(detachedVMI, []string{"/bin/bash", "-c", "/usr/sbin/ip link show|grep -c UP"})
ExpectWithOffset(1, strings.TrimSpace(output)).To(Equal("4"))
})
})
Context("VirtualMachineInstance with cni ptp plugin interface with custom MAC address", func() {
It("[test_id:1705]should configure valid custom MAC address on ptp interface when using tuning plugin", func() {
customMacAddress := "50:00:00:00:90:0d"
ptpInterface := v1.Interface{
Name: "ptp",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
ptpNetwork := v1.Network{
Name: "ptp",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "ptp-conf-1",
},
},
}
interfaces := []v1.Interface{ptpInterface}
networks := []v1.Network{ptpNetwork}
By("Creating a VM with custom MAC address on its ptp interface.")
interfaces[0].MacAddress = customMacAddress
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
By("Configuring static IP address to ptp interface.")
Expect(configInterface(vmiOne, "eth0", "10.1.1.1/24")).To(Succeed())
By("Verifying the desired custom MAC is the one that was actually configured on the interface.")
ipLinkShow := fmt.Sprintf("ip link show eth0 | grep -i \"%s\" | wc -l\n", customMacAddress)
err = console.SafeExpectBatch(vmiOne, []expect.Batcher{
&expect.BSnd{S: ipLinkShow},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
})
})
Context("VirtualMachineInstance with Linux bridge plugin interface", func() {
It("[test_id:1577]should create two virtual machines with one interface", func() {
By("checking virtual machine instance can ping the secondary virtual machine instance using Linux bridge CNI plugin")
interfaces := []v1.Interface{linuxBridgeInterface}
networks := []v1.Network{linuxBridgeNetwork}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
Expect(configInterface(vmiOne, "eth0", "10.1.1.1/24")).To(Succeed())
By("checking virtual machine interface eth0 state")
Expect(checkInterface(vmiOne, "eth0")).To(Succeed())
Expect(configInterface(vmiTwo, "eth0", "10.1.1.2/24")).To(Succeed())
By("checking virtual machine interface eth0 state")
Expect(checkInterface(vmiTwo, "eth0")).To(Succeed())
By("ping between virtual machines")
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
It("[test_id:1578]should create two virtual machines with two interfaces", func() {
By("checking the first virtual machine instance can ping 10.1.1.2 using Linux bridge CNI plugin")
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
Expect(configInterface(vmiOne, "eth1", "10.1.1.1/24")).To(Succeed())
By("checking virtual machine interface eth1 state")
Expect(checkInterface(vmiOne, "eth1")).To(Succeed())
Expect(configInterface(vmiTwo, "eth1", "10.1.1.2/24")).To(Succeed())
By("checking virtual machine interface eth1 state")
Expect(checkInterface(vmiTwo, "eth1")).To(Succeed())
By("ping between virtual machines")
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
})
Context("VirtualMachineInstance with Linux bridge CNI plugin interface and custom MAC address.", func() {
interfaces := []v1.Interface{linuxBridgeInterface}
networks := []v1.Network{linuxBridgeNetwork}
linuxBridgeIfIdx := 0
customMacAddress := "50:00:00:00:90:0d"
It("[test_id:676]should configure valid custom MAC address on Linux bridge CNI interface.", func() {
By("Creating a VM with Linux bridge CNI network interface and default MAC address.")
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
By("Creating another VM with custom MAC address on its Linux bridge CNI interface.")
interfaces[linuxBridgeIfIdx].MacAddress = customMacAddress
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
By("Configuring static IP address to the Linux bridge interface.")
Expect(configInterface(vmiOne, "eth0", "10.1.1.1/24")).To(Succeed())
Expect(configInterface(vmiTwo, "eth0", "10.1.1.2/24")).To(Succeed())
By("Verifying the desired custom MAC is the one that were actually configured on the interface.")
ipLinkShow := fmt.Sprintf("ip link show eth0 | grep -i \"%s\" | wc -l\n", customMacAddress)
err = console.SafeExpectBatch(vmiOne, []expect.Batcher{
&expect.BSnd{S: ipLinkShow},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
By("Ping from the VM with the custom MAC to the other VM.")
Expect(libnet.PingFromVMConsole(vmiOne, "10.1.1.2")).To(Succeed())
})
})
Context("Single VirtualMachineInstance with Linux bridge CNI plugin interface", func() {
It("[test_id:1756]should report all interfaces in Status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmiOne.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(2))
interfacesByName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfacesByName[ifc.Name] = ifc
}
for _, network := range networks {
ifc, is_present := interfacesByName[network.Name]
Expect(is_present).To(BeTrue())
Expect(ifc.MAC).To(Not(BeZero()))
}
Expect(interfacesByName["default"].MAC).To(Not(Equal(interfacesByName["linux-bridge"].MAC)))
err = console.SafeExpectBatch(updatedVmi, []expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("ip addr show eth0 | grep %s | wc -l", interfacesByName["default"].MAC)},
&expect.BExp{R: "1"},
}, 15)
err = console.SafeExpectBatch(updatedVmi, []expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("ip addr show eth1 | grep %s | wc -l", interfacesByName["linux-bridge"].MAC)},
&expect.BExp{R: "1"},
}, 15)
})
})
Context("VirtualMachineInstance with invalid MAC addres", func() {
It("[test_id:1713]should failed to start with invalid MAC address", func() {
By("Start VMI")
linuxBridgeIfIdx := 1
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
vmi.Spec.Domain.Devices.Interfaces[linuxBridgeIfIdx].MacAddress = "de:00c:00c:00:00:de:abc"
vmi.Spec.Networks = []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
testErr := err.(*errors.StatusError)
Expect(testErr.ErrStatus.Reason).To(BeEquivalentTo("Invalid"))
})
})
})
Describe("[rfe_id:1758][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance definition", func() {
Context("with qemu guest agent", func() {
It("[test_id:1757] should report guest interfaces in VMI status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
ep1Ip := "1.0.0.10/24"
ep2Ip := "1.0.0.11/24"
ep1IpV6 := "fe80::ce3d:82ff:fe52:24c0/64"
ep2IpV6 := "fe80::ce3d:82ff:fe52:24c1/64"
userdata := fmt.Sprintf(`#!/bin/bash
echo "fedora" |passwd fedora --stdin
setenforce 0
ip link add ep1 type veth peer name ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
mkdir -p /usr/local/bin
curl %s > /usr/local/bin/qemu-ga
chmod +x /usr/local/bin/qemu-ga
curl %s > /lib64/libpixman-1.so.0
systemd-run --unit=guestagent /usr/local/bin/qemu-ga
`, ep1Ip, ep2Ip, ep1IpV6, ep2IpV6, tests.GetUrl(tests.GuestAgentHttpUrl), tests.GetUrl(tests.PixmanUrl))
agentVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskFedora), userdata)
agentVMI.Spec.Domain.Devices.Interfaces = interfaces
agentVMI.Spec.Networks = networks
agentVMI.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1024M")
By("Starting a VirtualMachineInstance")
agentVMI, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(agentVMI)
Expect(err).ToNot(HaveOccurred(), "Should create VMI successfully")
tests.WaitForSuccessfulVMIStart(agentVMI)
// Need to wait for cloud init to finish and start the agent inside the vmi.
tests.WaitAgentConnected(virtClient, agentVMI)
getOptions := &metav1.GetOptions{}
Eventually(func() bool {
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
if err != nil {
return false
}
return len(updatedVmi.Status.Interfaces) == 4
}, 420*time.Second, 4).Should(BeTrue(), "Should have interfaces in vmi status")
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(4))
interfaceByIfcName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfaceByIfcName[ifc.InterfaceName] = ifc
}
Expect(interfaceByIfcName["eth0"].Name).To(Equal("default"))
Expect(interfaceByIfcName["eth0"].InterfaceName).To(Equal("eth0"))
Expect(interfaceByIfcName["eth1"].Name).To(Equal("linux-bridge"))
Expect(interfaceByIfcName["eth1"].InterfaceName).To(Equal("eth1"))
Expect(interfaceByIfcName["ep1"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep1"].InterfaceName).To(Equal("ep1"))
Expect(interfaceByIfcName["ep1"].IP).To(Equal(ep1Ip))
Expect(interfaceByIfcName["ep1"].IPs).To(Equal([]string{ep1Ip, ep1IpV6}))
Expect(interfaceByIfcName["ep2"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep2"].InterfaceName).To(Equal("ep2"))
Expect(interfaceByIfcName["ep2"].IP).To(Equal(ep2Ip))
Expect(interfaceByIfcName["ep2"].IPs).To(Equal([]string{ep2Ip, ep2IpV6}))
})
})
})
})
var _ = Describe("[Serial]SRIOV", func() {
var err error
var virtClient kubecli.KubevirtClient
sriovResourceName := os.Getenv("SRIOV_RESOURCE_NAME")
if sriovResourceName == "" {
sriovResourceName = "openshift.io/sriov_net"
}
tests.BeforeAll(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
tests.BeforeTestCleanup()
// Check if the hardware supports SRIOV
sriovcheck := checkSriovEnabled(virtClient, sriovResourceName)
if !sriovcheck {
Skip("Sriov is not enabled in this environment. Skip these tests using - export FUNC_TEST_ARGS='--ginkgo.skip=SRIOV'")
}
// Create two sriov networks referring to the same resource name
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov")).
Body([]byte(fmt.Sprintf(sriovConfCRD, "sriov", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov2")).
Body([]byte(fmt.Sprintf(sriovConfCRD, "sriov2", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov-link-enabled")).
Body([]byte(fmt.Sprintf(sriovLinkEnableConfCRD, "sriov-link-enabled", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
})
BeforeEach(func() {
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
Context("VirtualMachineInstance with sriov plugin interface", func() {
getSriovVmi := func(networks []string) *v1.VirtualMachineInstance {
// Pre-configured container-disk image for sriov-lane
vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskFedoraSRIOVLane))
// fedora requires some more memory to boot without kernel panics
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceName("memory")] = resource.MustParse("1024M")
// newer fedora kernels may require hardware RNG to boot
vmi.Spec.Domain.Devices.Rng = &v1.Rng{}
// pod network interface
vmi.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "default", Ports: []v1.Port{}, InterfaceBindingMethod: v1.InterfaceBindingMethod{Masquerade: &v1.InterfaceMasquerade{}}}}
vmi.Spec.Networks = []v1.Network{*v1.DefaultPodNetwork()}
// sriov network interfaces
for _, name := range networks {
iface := v1.Interface{Name: name, InterfaceBindingMethod: v1.InterfaceBindingMethod{SRIOV: &v1.InterfaceSRIOV{}}}
network := v1.Network{Name: name, NetworkSource: v1.NetworkSource{Multus: &v1.MultusNetwork{NetworkName: name}}}
vmi.Spec.Domain.Devices.Interfaces = append(vmi.Spec.Domain.Devices.Interfaces, iface)
vmi.Spec.Networks = append(vmi.Spec.Networks, network)
}
return vmi
}
startVmi := func(vmi *v1.VirtualMachineInstance) {
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
return
}
waitVmi := func(vmi *v1.VirtualMachineInstance) {
// Need to wait for cloud init to finish and start the agent inside the vmi.
vmi, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(vmi, tests.LoggedInFedoraExpecter)
tests.WaitAgentConnected(virtClient, vmi)
return
}
checkDefaultInterfaceInPod := func(vmi *v1.VirtualMachineInstance) {
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
By("checking default interface is present")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "eth0"},
)
Expect(err).ToNot(HaveOccurred())
By("checking default interface is attached to VMI")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "k6t-eth0"},
)
Expect(err).ToNot(HaveOccurred())
}
checkInterfacesInGuest := func(vmi *v1.VirtualMachineInstance, interfaces []string) {
for _, iface := range interfaces {
Expect(checkInterface(vmi, iface)).To(Succeed())
}
}
It("[test_id:1754]should create a virtual machine with sriov interface", func() {
vmi := getSriovVmi([]string{"sriov"})
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "echo $KUBEVIRT_RESOURCE_NAME_sriov"},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
// there is little we can do beyond just checking two devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
It("[test_id:1754]should create a virtual machine with sriov interface with all pci devices on the root bus", func() {
vmi := getSriovVmi([]string{"sriov"})
vmi.Annotations = map[string]string{
v1.PlacePCIDevicesOnRootComplex: "true",
}
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "echo $KUBEVIRT_RESOURCE_NAME_sriov"},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
domSpec, err := tests.GetRunningVMIDomainSpec(vmi)
rootPortController := []api.Controller{}
for _, c := range domSpec.Devices.Controllers {
if c.Model == "pcie-root-port" {
rootPortController = append(rootPortController, c)
}
}
Expect(rootPortController).To(HaveLen(0), "libvirt should not add additional buses to the root one")
})
It("[test_id:3959]should create a virtual machine with sriov interface and dedicatedCPUs", func() {
// In addition to verifying that we can start a VMI with CPU pinning
// this also tests if we've correctly calculated the overhead for VFIO devices.
vmi := getSriovVmi([]string{"sriov"})
vmi.Spec.Domain.CPU = &v1.CPU{
Cores: 2,
DedicatedCPUPlacement: true,
}
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "echo $KUBEVIRT_RESOURCE_NAME_sriov"},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
})
It("[test_id:3985]should create a virtual machine with sriov interface with custom MAC address", func() {
mac := "de:ad:00:00:be:ef"
vmi := getSriovVmi([]string{"sriov"})
vmi.Spec.Domain.Devices.Interfaces[1].MacAddress = mac
startVmi(vmi)
waitVmi(vmi)
vmi, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
interfaceName, err := getInterfaceNameByMAC(vmi, mac)
Expect(err).NotTo(HaveOccurred())
By("checking virtual machine instance has an interface with the requested MAC address")
Expect(checkMacAddress(vmi, interfaceName, mac)).To(Succeed())
})
It("[test_id:1755]should create a virtual machine with two sriov interfaces referring the same resource", func() {
vmi := getSriovVmi([]string{"sriov", "sriov2"})
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variables are defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
for _, name := range []string{"sriov", "sriov"} {
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", fmt.Sprintf("echo $KUBEVIRT_RESOURCE_NAME_%s", name)},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
}
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has three interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1", "eth2"})
// there is little we can do beyond just checking three devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
// pingThroughSriov instantiates two VMs connected through SR-IOV and
// pings between them.
// Note: test case assumes interconnectivity between SR-IOV
// interfaces. It can be achieved either by configuring the external switch
// properly, or via in-PF switching for VFs (works for some NIC models)
pingThroughSriov := func(cidrA, cidrB string) {
// start peer machines with sriov interfaces from the same resource pool
vmi1 := getSriovVmi([]string{"sriov-link-enabled"})
vmi2 := getSriovVmi([]string{"sriov-link-enabled"})
// Explicitly choose different random mac addresses instead of relying on kubemacpool to do it:
// 1) we don't at the moment deploy kubemacpool in kind providers
// 2) even if we would do, it's probably a good idea to have the suite not depend on this fact
//
// This step is needed to guarantee that no VFs on the PF carry a duplicate MAC address that may affect
// ability of VMIs to send and receive ICMP packets on their ports.
mac1, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
mac2, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
vmi1.Spec.Domain.Devices.Interfaces[1].MacAddress = mac1.String()
vmi2.Spec.Domain.Devices.Interfaces[1].MacAddress = mac2.String()
startVmi(vmi1)
startVmi(vmi2)
waitVmi(vmi1)
waitVmi(vmi2)
vmi1, err = virtClient.VirtualMachineInstance(vmi1.Namespace).Get(vmi1.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
vmi2, err = virtClient.VirtualMachineInstance(vmi2.Namespace).Get(vmi2.Name, &metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// manually configure IP/link on sriov interfaces because there is
// no DHCP server to serve the address to the guest
Expect(configureInterfaceStaticIPByMAC(vmi1, mac1.String(), cidrA)).To(Succeed())
Expect(configureInterfaceStaticIPByMAC(vmi2, mac2.String(), cidrB)).To(Succeed())
// now check ICMP goes both ways
Expect(libnet.PingFromVMConsole(vmi1, cidrToIP(cidrB))).To(Succeed())
Expect(libnet.PingFromVMConsole(vmi2, cidrToIP(cidrA))).To(Succeed())
}
It("[test_id:3956]should connect to another machine with sriov interface over IPv4", func() {
Skip("Skip until https://github.com/kubevirt/kubevirt/issues/3774 fixed")
pingThroughSriov("192.168.1.1/24", "192.168.1.2/24")
})
It("[test_id:3957]should connect to another machine with sriov interface over IPv6", func() {
Skip("Skip until https://github.com/kubevirt/kubevirt/issues/3747 is fixed")
pingThroughSriov("fc00::1/64", "fc00::2/64")
})
})
})
var _ = Describe("[Serial]Macvtap", func() {
var err error
var virtClient kubecli.KubevirtClient
var macvtapLowerDevice string
var macvtapNetworkName string
BeforeEach(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
macvtapLowerDevice = "eth0"
macvtapNetworkName = "net1"
// cleanup the environment
tests.BeforeTestCleanup()
})
BeforeEach(func() {
tests.EnableFeatureGate(virtconfig.MacvtapGate)
})
BeforeEach(func() {
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, macvtapNetworkName)).
Body([]byte(fmt.Sprintf(macvtapNetworkConf, macvtapNetworkName, tests.NamespaceTestDefault, macvtapLowerDevice, macvtapNetworkName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred(), "A macvtap network named %s should be provisioned", macvtapNetworkName)
})
AfterEach(func() {
tests.DisableFeatureGate(virtconfig.MacvtapGate)
})
newCirrosVMIWithMacvtapNetwork := func(macvtapNetworkName string) *v1.VirtualMachineInstance {
macvtapMultusNetwork := libvmi.MultusNetwork(macvtapNetworkName)
return libvmi.NewCirros(
libvmi.WithInterface(*v1.DefaultMacvtapNetworkInterface(macvtapNetworkName)),
libvmi.WithNetwork(&macvtapMultusNetwork))
}
createCirrosVMIWithMacvtapDefinedMAC := func(virtClient kubecli.KubevirtClient, networkName string, mac string) *v1.VirtualMachineInstance {
vmi := newCirrosVMIWithMacvtapNetwork(networkName)
vmi.Spec.Domain.Devices.Interfaces[0].MacAddress = mac
return tests.RunVMIAndExpectLaunchWithIgnoreWarningArg(vmi, 180, false)
}
createCirrosVMIWithMacvtapStaticIP := func(virtClient kubecli.KubevirtClient, nodeName string, networkName string, ifaceName string, ipCIDR string, mac *string) *v1.VirtualMachineInstance {
vmi := newCirrosVMIWithMacvtapNetwork(networkName)
if mac != nil {
vmi.Spec.Domain.Devices.Interfaces[0].MacAddress = *mac
}
vmi = tests.WaitUntilVMIReady(
tests.StartVmOnNode(vmi, nodeName),
tests.LoggedInCirrosExpecter)
// configure the client VMI
Expect(configVMIInterfaceWithSudo(vmi, ifaceName, ipCIDR)).To(Succeed())
return vmi
}
Context("a virtual machine with one macvtap interface, with a custom MAC address", func() {
var serverVMI *v1.VirtualMachineInstance
var chosenMAC string
var serverCIDR string
var nodeList *k8sv1.NodeList
var nodeName string
BeforeEach(func() {
nodeList = tests.GetAllSchedulableNodes(virtClient)
Expect(nodeList.Items).NotTo(BeEmpty(), "schedulable kubernetes nodes must be present")
nodeName = nodeList.Items[0].Name
chosenMAC = "de:ad:00:00:be:af"
serverCIDR = "192.0.2.102/24"
serverVMI = createCirrosVMIWithMacvtapStaticIP(virtClient, nodeName, macvtapNetworkName, "eth0", serverCIDR, &chosenMAC)
})
It("should have the specified MAC address reported back via the API", func() {
Expect(len(serverVMI.Status.Interfaces)).To(Equal(1), "should have a single interface")
Expect(serverVMI.Status.Interfaces[0].MAC).To(Equal(chosenMAC), "the expected MAC address should be set in the VMI")
})
Context("and another virtual machine connected to the same network", func() {
var clientVMI *v1.VirtualMachineInstance
BeforeEach(func() {
clientVMI = createCirrosVMIWithMacvtapStaticIP(virtClient, nodeName, macvtapNetworkName, "eth0", "192.0.2.101/24", nil)
})
It("can communicate with the virtual machine in the same network", func() {
Expect(libnet.PingFromVMConsole(clientVMI, cidrToIP(serverCIDR))).To(Succeed())
})
})
})
Context("VMI migration", func() {
var macvtapVMI *v1.VirtualMachineInstance
BeforeEach(func() {
nodes := tests.GetAllSchedulableNodes(virtClient)
Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node")
if len(nodes.Items) < 2 {
Skip("Migration tests require at least 2 nodes")
}
if !tests.HasLiveMigration() {
Skip("Migration tests require the 'LiveMigration' feature gate")
}
})
BeforeEach(func() {
macAddress := "02:03:04:05:06:07"
macvtapVMI = createCirrosVMIWithMacvtapDefinedMAC(virtClient, macvtapNetworkName, macAddress)
})
It("should be successful when the VMI MAC address is defined in its spec", func() {
By("starting the migration")
migration := tests.NewRandomMigration(macvtapVMI.GetName(), macvtapVMI.GetNamespace())
migrationUID := tests.RunMigrationAndExpectCompletion(virtClient, migration, migrationWaitTime)
// check VMI, confirm migration state
tests.ConfirmVMIPostMigration(virtClient, macvtapVMI, migrationUID)
})
})
})
func cidrToIP(cidr string) string {
ip, _, err := net.ParseCIDR(cidr)
Expect(err).ToNot(HaveOccurred(), "Should be able to parse IP and prefix length from CIDR")
return ip.String()
}
func configVMIInterfaceWithSudo(vmi *v1.VirtualMachineInstance, interfaceName, interfaceAddress string) error {
return configInterface(vmi, interfaceName, interfaceAddress, "sudo ")
}
func configInterface(vmi *v1.VirtualMachineInstance, interfaceName, interfaceAddress string, userModifierPrefix ...string) error {
setStaticIpCmd := fmt.Sprintf("%sip addr add %s dev %s\n", strings.Join(userModifierPrefix, " "), interfaceAddress, interfaceName)
err := runSafeCommand(vmi, setStaticIpCmd)
if err != nil {
return fmt.Errorf("could not configure address %s for interface %s on VMI %s: %w", interfaceAddress, interfaceName, vmi.Name, err)
}
return setInterfaceUp(vmi, interfaceName)
}
func configureInterfaceStaticIPByMAC(vmi *v1.VirtualMachineInstance, interfaceMac, interfaceAddress string) error {
interfaceName, err := getInterfaceNameByMAC(vmi, interfaceMac)
if err != nil {
return fmt.Errorf("could not configure address %s for interface with mac %s on VMI %s: %w", interfaceAddress, interfaceMac, vmi.Name, err)
}
return configInterface(vmi, interfaceName, interfaceAddress)
}
func checkInterface(vmi *v1.VirtualMachineInstance, interfaceName string) error {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := runSafeCommand(vmi, cmdCheck)
if err != nil {
return fmt.Errorf("could not check interface: interface %s was not found in the VMI %s: %w", interfaceName, vmi.Name, err)
}
return nil
}
func checkMacAddress(vmi *v1.VirtualMachineInstance, interfaceName, macAddress string) error {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: macAddress},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
if err != nil {
return fmt.Errorf("could not check mac address of interface %s: MAC %s was not found in the VMI %s: %w", interfaceName, macAddress, vmi.Name, err)
}
return nil
}
func setInterfaceUp(vmi *v1.VirtualMachineInstance, interfaceName string) error {
setUpCmd := fmt.Sprintf("ip link set %s up\n", interfaceName)
err := runSafeCommand(vmi, setUpCmd)
if err != nil {
return fmt.Errorf("could not set interface %s up on VMI %s: %w", interfaceName, vmi.Name, err)
}
return nil
}
func runSafeCommand(vmi *v1.VirtualMachineInstance, command string) error {
return console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: command},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
}
func getInterfaceNameByMAC(vmi *v1.VirtualMachineInstance, mac string) (string, error) {
for _, iface := range vmi.Status.Interfaces {
if iface.MAC == mac {
return iface.InterfaceName, nil
}
}
return "", fmt.Errorf("could not get sriov interface by MAC: no interface on VMI %s with MAC %s", vmi.Name, mac)
}
// Tests in Multus suite are expecting a Linux bridge to be available on each node, with iptables allowing
// traffic to go through. This function creates a Daemon Set on the cluster (if not exists yet), this Daemon
// Set creates a linux bridge and configures the firewall. We use iptables-compat in order to work with
// both iptables and newer nftables.
// TODO: Once kubernetes-nmstate is ready, we should use it instead
func configureNodeNetwork(virtClient kubecli.KubevirtClient) {
// Fetching the kubevirt-operator image from the pod makes this independent from the installation method / image used
pods, err := virtClient.CoreV1().Pods(flags.KubeVirtInstallNamespace).List(metav1.ListOptions{LabelSelector: "kubevirt.io=virt-operator"})
Expect(err).ToNot(HaveOccurred())
Expect(pods.Items).ToNot(BeEmpty())
virtOperatorImage := pods.Items[0].Spec.Containers[0].Image
// Privileged DaemonSet configuring host networking as needed
networkConfigDaemonSet := appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "network-config",
Namespace: metav1.NamespaceSystem,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "network-config"},
},
Template: k8sv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "network-config"},
},
Spec: k8sv1.PodSpec{
Containers: []k8sv1.Container{
{
Name: "network-config",
// Reuse image which is already installed in the cluster. All we need is chroot.
// Local OKD cluster doesn't allow us to pull from the outside.
Image: virtOperatorImage,
Command: []string{
"sh",
"-c",
"set -x; chroot /host ip link add br10 type bridge; chroot /host iptables -I FORWARD 1 -i br10 -j ACCEPT; touch /tmp/ready; sleep INF",
},
SecurityContext: &k8sv1.SecurityContext{
Privileged: pointer.BoolPtr(true),
RunAsUser: pointer.Int64Ptr(0),
},
ReadinessProbe: &k8sv1.Probe{
Handler: k8sv1.Handler{
Exec: &k8sv1.ExecAction{
Command: []string{"cat", "/tmp/ready"},
},
},
},
VolumeMounts: []k8sv1.VolumeMount{
k8sv1.VolumeMount{
Name: "host",
MountPath: "/host",
},
},
},
},
Volumes: []k8sv1.Volume{
k8sv1.Volume{
Name: "host",
VolumeSource: k8sv1.VolumeSource{
HostPath: &k8sv1.HostPathVolumeSource{
Path: "/",
},
},
},
},
HostNetwork: true,
},
},
},
}
// Helper function returning existing network-config DaemonSet if exists
getNetworkConfigDaemonSet := func() *appsv1.DaemonSet {
daemonSet, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(networkConfigDaemonSet.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
Expect(err).NotTo(HaveOccurred())
return daemonSet
}
// If the DaemonSet haven't been created yet, do so
runningNetworkConfigDaemonSet := getNetworkConfigDaemonSet()
if runningNetworkConfigDaemonSet == nil {
_, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Create(&networkConfigDaemonSet)
Expect(err).NotTo(HaveOccurred())
}
// Make sure that all pods in the Daemon Set finished the configuration
nodes := tests.GetAllSchedulableNodes(virtClient)
Eventually(func() int {
daemonSet := getNetworkConfigDaemonSet()
return int(daemonSet.Status.NumberAvailable)
}, time.Minute, time.Second).Should(Equal(len(nodes.Items)))
}
func checkSriovEnabled(virtClient kubecli.KubevirtClient, sriovResourceName string) bool {
nodes := tests.GetAllSchedulableNodes(virtClient)
Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node")
for _, node := range nodes.Items {
resourceList := node.Status.Allocatable
for k, v := range resourceList {
if string(k) == sriovResourceName {
if v.Value() > 0 {
return true
}
}
}
}
return false
}
| [
"\"SRIOV_RESOURCE_NAME\""
]
| []
| [
"SRIOV_RESOURCE_NAME"
]
| [] | ["SRIOV_RESOURCE_NAME"] | go | 1 | 0 | |
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java | /*
* Copyright 2017-2018, Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.cluster.model;
import io.fabric8.kubernetes.api.model.Affinity;
import io.fabric8.kubernetes.api.model.AffinityBuilder;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.ContainerBuilder;
import io.fabric8.kubernetes.api.model.ContainerPort;
import io.fabric8.kubernetes.api.model.EnvVar;
import io.fabric8.kubernetes.api.model.IntOrString;
import io.fabric8.kubernetes.api.model.LabelSelector;
import io.fabric8.kubernetes.api.model.LifecycleBuilder;
import io.fabric8.kubernetes.api.model.LocalObjectReference;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirements;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.ServicePort;
import io.fabric8.kubernetes.api.model.Toleration;
import io.fabric8.kubernetes.api.model.Volume;
import io.fabric8.kubernetes.api.model.VolumeBuilder;
import io.fabric8.kubernetes.api.model.VolumeMount;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.api.model.extensions.HTTPIngressPath;
import io.fabric8.kubernetes.api.model.extensions.HTTPIngressPathBuilder;
import io.fabric8.kubernetes.api.model.extensions.Ingress;
import io.fabric8.kubernetes.api.model.extensions.IngressBuilder;
import io.fabric8.kubernetes.api.model.extensions.IngressRule;
import io.fabric8.kubernetes.api.model.extensions.IngressRuleBuilder;
import io.fabric8.kubernetes.api.model.extensions.IngressTLS;
import io.fabric8.kubernetes.api.model.extensions.IngressTLSBuilder;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicy;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicyBuilder;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicyIngressRule;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicyIngressRuleBuilder;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicyPeer;
import io.fabric8.kubernetes.api.model.networking.NetworkPolicyPort;
import io.fabric8.kubernetes.api.model.policy.PodDisruptionBudget;
import io.fabric8.kubernetes.api.model.rbac.KubernetesClusterRoleBinding;
import io.fabric8.kubernetes.api.model.rbac.KubernetesClusterRoleBindingBuilder;
import io.fabric8.kubernetes.api.model.rbac.KubernetesRoleRef;
import io.fabric8.kubernetes.api.model.rbac.KubernetesRoleRefBuilder;
import io.fabric8.kubernetes.api.model.rbac.KubernetesSubject;
import io.fabric8.kubernetes.api.model.rbac.KubernetesSubjectBuilder;
import io.fabric8.openshift.api.model.Route;
import io.fabric8.openshift.api.model.RouteBuilder;
import io.strimzi.api.kafka.model.storage.EphemeralStorage;
import io.strimzi.api.kafka.model.InlineLogging;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaAuthorization;
import io.strimzi.api.kafka.model.KafkaAuthorizationSimple;
import io.strimzi.api.kafka.model.KafkaClusterSpec;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.Logging;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorage;
import io.strimzi.api.kafka.model.Rack;
import io.strimzi.api.kafka.model.storage.SingleVolumeStorage;
import io.strimzi.api.kafka.model.storage.Storage;
import io.strimzi.api.kafka.model.TlsSidecar;
import io.strimzi.api.kafka.model.listener.ExternalListenerBootstrapOverride;
import io.strimzi.api.kafka.model.listener.ExternalListenerBrokerOverride;
import io.strimzi.api.kafka.model.listener.IngressListenerBrokerConfiguration;
import io.strimzi.api.kafka.model.listener.IngressListenerConfiguration;
import io.strimzi.api.kafka.model.listener.KafkaListenerAuthenticationTls;
import io.strimzi.api.kafka.model.listener.KafkaListenerExternalIngress;
import io.strimzi.api.kafka.model.listener.KafkaListenerExternalLoadBalancer;
import io.strimzi.api.kafka.model.listener.KafkaListenerExternalNodePort;
import io.strimzi.api.kafka.model.listener.KafkaListenerExternalRoute;
import io.strimzi.api.kafka.model.listener.KafkaListeners;
import io.strimzi.api.kafka.model.listener.LoadBalancerListenerBrokerOverride;
import io.strimzi.api.kafka.model.listener.LoadBalancerListenerOverride;
import io.strimzi.api.kafka.model.listener.NodePortListenerBrokerOverride;
import io.strimzi.api.kafka.model.listener.NodePortListenerOverride;
import io.strimzi.api.kafka.model.listener.RouteListenerBrokerOverride;
import io.strimzi.api.kafka.model.listener.RouteListenerOverride;
import io.strimzi.api.kafka.model.template.KafkaClusterTemplate;
import io.strimzi.certs.CertAndKey;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.model.Labels;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static java.util.Arrays.asList;
@SuppressWarnings("checkstyle:ClassDataAbstractionCoupling")
public class KafkaCluster extends AbstractModel {
protected static final String INIT_NAME = "kafka-init";
protected static final String INIT_VOLUME_NAME = "rack-volume";
protected static final String INIT_VOLUME_MOUNT = "/opt/kafka/init";
private static final String ENV_VAR_KAFKA_INIT_RACK_TOPOLOGY_KEY = "RACK_TOPOLOGY_KEY";
private static final String ENV_VAR_KAFKA_INIT_NODE_NAME = "NODE_NAME";
private static final String ENV_VAR_KAFKA_INIT_EXTERNAL_ADDRESS = "EXTERNAL_ADDRESS";
private static final String ENV_VAR_KAFKA_INIT_EXTERNAL_ADVERTISED_ADDRESSES = "EXTERNAL_ADVERTISED_ADDRESSES";
/** {@code TRUE} when the CLIENT listener (PLAIN transport) should be enabled*/
private static final String ENV_VAR_KAFKA_CLIENT_ENABLED = "KAFKA_CLIENT_ENABLED";
/** The authentication to configure for the CLIENT listener (PLAIN transport). */
private static final String ENV_VAR_KAFKA_CLIENT_AUTHENTICATION = "KAFKA_CLIENT_AUTHENTICATION";
/** {@code TRUE} when the CLIENTTLS listener (TLS transport) should be enabled*/
private static final String ENV_VAR_KAFKA_CLIENTTLS_ENABLED = "KAFKA_CLIENTTLS_ENABLED";
/** The authentication to configure for the CLIENTTLS listener (TLS transport) . */
private static final String ENV_VAR_KAFKA_CLIENTTLS_AUTHENTICATION = "KAFKA_CLIENTTLS_AUTHENTICATION";
public static final String ENV_VAR_KAFKA_EXTERNAL_ENABLED = "KAFKA_EXTERNAL_ENABLED";
protected static final String ENV_VAR_KAFKA_EXTERNAL_ADDRESSES = "KAFKA_EXTERNAL_ADDRESSES";
protected static final String ENV_VAR_KAFKA_EXTERNAL_AUTHENTICATION = "KAFKA_EXTERNAL_AUTHENTICATION";
protected static final String ENV_VAR_KAFKA_EXTERNAL_TLS = "KAFKA_EXTERNAL_TLS";
private static final String ENV_VAR_KAFKA_AUTHORIZATION_TYPE = "KAFKA_AUTHORIZATION_TYPE";
private static final String ENV_VAR_KAFKA_AUTHORIZATION_SUPER_USERS = "KAFKA_AUTHORIZATION_SUPER_USERS";
public static final String ENV_VAR_KAFKA_ZOOKEEPER_CONNECT = "KAFKA_ZOOKEEPER_CONNECT";
private static final String ENV_VAR_KAFKA_METRICS_ENABLED = "KAFKA_METRICS_ENABLED";
public static final String ENV_VAR_KAFKA_LOG_DIRS = "KAFKA_LOG_DIRS";
public static final String ENV_VAR_KAFKA_CONFIGURATION = "KAFKA_CONFIGURATION";
protected static final int CLIENT_PORT = 9092;
protected static final String CLIENT_PORT_NAME = "clients";
protected static final int REPLICATION_PORT = 9091;
protected static final String REPLICATION_PORT_NAME = "replication";
protected static final int CLIENT_TLS_PORT = 9093;
protected static final String CLIENT_TLS_PORT_NAME = "clientstls";
protected static final int EXTERNAL_PORT = 9094;
protected static final String EXTERNAL_PORT_NAME = "external";
protected static final String KAFKA_NAME = "kafka";
protected static final String CLUSTER_CA_CERTS_VOLUME = "cluster-ca";
protected static final String BROKER_CERTS_VOLUME = "broker-certs";
protected static final String CLIENT_CA_CERTS_VOLUME = "client-ca-cert";
protected static final String CLUSTER_CA_CERTS_VOLUME_MOUNT = "/opt/kafka/cluster-ca-certs";
protected static final String BROKER_CERTS_VOLUME_MOUNT = "/opt/kafka/broker-certs";
protected static final String CLIENT_CA_CERTS_VOLUME_MOUNT = "/opt/kafka/client-ca-certs";
protected static final String TLS_SIDECAR_NAME = "tls-sidecar";
protected static final String TLS_SIDECAR_KAFKA_CERTS_VOLUME_MOUNT = "/etc/tls-sidecar/kafka-brokers/";
protected static final String TLS_SIDECAR_CLUSTER_CA_CERTS_VOLUME_MOUNT = "/etc/tls-sidecar/cluster-ca-certs/";
private static final String NAME_SUFFIX = "-kafka";
// Suffixes for secrets with certificates
private static final String SECRET_BROKERS_SUFFIX = NAME_SUFFIX + "-brokers";
/** Records the Kafka version currently running inside Kafka StatefulSet */
public static final String ANNO_STRIMZI_IO_KAFKA_VERSION = Annotations.STRIMZI_DOMAIN + "/kafka-version";
/** Records the state of the Kafka upgrade process. Unset outside of upgrades. */
public static final String ANNO_STRIMZI_IO_FROM_VERSION = Annotations.STRIMZI_DOMAIN + "/from-version";
/** Records the state of the Kafka upgrade process. Unset outside of upgrades. */
public static final String ANNO_STRIMZI_IO_TO_VERSION = Annotations.STRIMZI_DOMAIN + "/to-version";
// Kafka configuration
private String zookeeperConnect;
private Rack rack;
private String initImage;
private TlsSidecar tlsSidecar;
private KafkaListeners listeners;
private KafkaAuthorization authorization;
private Set<String> externalAddresses = new HashSet<>();
private KafkaVersion kafkaVersion;
// Templates
protected Map<String, String> templateExternalBootstrapServiceLabels;
protected Map<String, String> templateExternalBootstrapServiceAnnotations;
protected Map<String, String> templatePerPodServiceLabels;
protected Map<String, String> templatePerPodServiceAnnotations;
protected Map<String, String> templateExternalBootstrapRouteLabels;
protected Map<String, String> templateExternalBootstrapRouteAnnotations;
protected Map<String, String> templatePerPodRouteLabels;
protected Map<String, String> templatePerPodRouteAnnotations;
protected Map<String, String> templateExternalBootstrapIngressLabels;
protected Map<String, String> templateExternalBootstrapIngressAnnotations;
protected Map<String, String> templatePerPodIngressLabels;
protected Map<String, String> templatePerPodIngressAnnotations;
// Configuration defaults
private static final int DEFAULT_REPLICAS = 3;
private static final int DEFAULT_HEALTHCHECK_DELAY = 15;
private static final int DEFAULT_HEALTHCHECK_TIMEOUT = 5;
private static final boolean DEFAULT_KAFKA_METRICS_ENABLED = false;
/**
* Private key and certificate for each Kafka Pod name
* used as server certificates for Kafka brokers
*/
private Map<String, CertAndKey> brokerCerts;
/**
* Lists with volumes, persistent volume claims and related volume mount paths for the storage
*/
List<Volume> dataVolumes = new ArrayList<>();
List<PersistentVolumeClaim> dataPvcs = new ArrayList<>();
List<VolumeMount> dataVolumeMountPaths = new ArrayList<>();
/**
* Constructor
*
* @param namespace Kubernetes/OpenShift namespace where Kafka cluster resources are going to be created
* @param cluster overall cluster name
* @param labels labels to add to the cluster
*/
private KafkaCluster(String namespace, String cluster, Labels labels) {
super(namespace, cluster, labels);
this.name = kafkaClusterName(cluster);
this.serviceName = serviceName(cluster);
this.headlessServiceName = headlessServiceName(cluster);
this.ancillaryConfigName = metricAndLogConfigsName(cluster);
this.replicas = DEFAULT_REPLICAS;
this.readinessTimeout = DEFAULT_HEALTHCHECK_TIMEOUT;
this.readinessInitialDelay = DEFAULT_HEALTHCHECK_DELAY;
this.livenessTimeout = DEFAULT_HEALTHCHECK_TIMEOUT;
this.livenessInitialDelay = DEFAULT_HEALTHCHECK_DELAY;
this.isMetricsEnabled = DEFAULT_KAFKA_METRICS_ENABLED;
setZookeeperConnect(ZookeeperCluster.serviceName(cluster) + ":2181");
this.mountPath = "/var/lib/kafka";
this.logAndMetricsConfigVolumeName = "kafka-metrics-and-logging";
this.logAndMetricsConfigMountPath = "/opt/kafka/custom-config/";
this.initImage = System.getenv().getOrDefault("STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE", "strimzi/operator:latest");
}
public static String kafkaClusterName(String cluster) {
return KafkaResources.kafkaStatefulSetName(cluster);
}
public static String metricAndLogConfigsName(String cluster) {
return KafkaResources.kafkaMetricsAndLogConfigMapName(cluster);
}
public static String serviceName(String cluster) {
return KafkaResources.bootstrapServiceName(cluster);
}
public static String podDnsName(String namespace, String cluster, int podId) {
return String.format("%s.%s.%s.svc.%s",
KafkaCluster.kafkaPodName(cluster, podId),
KafkaCluster.headlessServiceName(cluster),
namespace,
ModelUtils.KUBERNETES_SERVICE_DNS_DOMAIN);
}
/**
* Generates the name of the service used as bootstrap service for external clients
*
* @param cluster Name of the cluster
* @return
*/
public static String externalBootstrapServiceName(String cluster) {
return KafkaResources.externalBootstrapServiceName(cluster);
}
/**
* Generates the name of the service for exposing individual pods
*
* @param cluster Name of the cluster
* @param pod Pod sequence number assign by StatefulSet
* @return
*/
public static String externalServiceName(String cluster, int pod) {
return kafkaClusterName(cluster) + "-" + pod;
}
public static String headlessServiceName(String cluster) {
return KafkaResources.brokersServiceName(cluster);
}
public static String kafkaPodName(String cluster, int pod) {
return kafkaClusterName(cluster) + "-" + pod;
}
public static String clientsCaKeySecretName(String cluster) {
return KafkaResources.clientsCaKeySecretName(cluster);
}
public static String brokersSecretName(String cluster) {
return cluster + KafkaCluster.SECRET_BROKERS_SUFFIX;
}
public static String clientsCaCertSecretName(String cluster) {
return KafkaResources.clientsCaCertificateSecretName(cluster);
}
public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
return fromCrd(kafkaAssembly, versions, null);
}
@SuppressWarnings("checkstyle:MethodLength")
public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage) {
KafkaCluster result = new KafkaCluster(kafkaAssembly.getMetadata().getNamespace(),
kafkaAssembly.getMetadata().getName(),
Labels.fromResource(kafkaAssembly).withKind(kafkaAssembly.getKind()));
result.setOwnerReference(kafkaAssembly);
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
result.setReplicas(kafkaClusterSpec.getReplicas());
String image = versions.kafkaImage(kafkaClusterSpec.getImage(), kafkaClusterSpec.getVersion());
if (image == null) {
throw new InvalidResourceException("Version " + kafkaClusterSpec.getVersion() + " is not supported. Supported versions are: " + String.join(", ", versions.supportedVersions()) + ".");
}
result.setImage(image);
if (kafkaClusterSpec.getReadinessProbe() != null) {
result.setReadinessInitialDelay(kafkaClusterSpec.getReadinessProbe().getInitialDelaySeconds());
result.setReadinessTimeout(kafkaClusterSpec.getReadinessProbe().getTimeoutSeconds());
}
if (kafkaClusterSpec.getLivenessProbe() != null) {
result.setLivenessInitialDelay(kafkaClusterSpec.getLivenessProbe().getInitialDelaySeconds());
result.setLivenessTimeout(kafkaClusterSpec.getLivenessProbe().getTimeoutSeconds());
}
result.setRack(kafkaClusterSpec.getRack());
String initImage = kafkaClusterSpec.getBrokerRackInitImage();
if (initImage == null) {
initImage = System.getenv().getOrDefault("STRIMZI_DEFAULT_KAFKA_INIT_IMAGE", "strimzi/operator:latest");
}
result.setInitImage(initImage);
Logging logging = kafkaClusterSpec.getLogging();
result.setLogging(logging == null ? new InlineLogging() : logging);
result.setGcLoggingEnabled(kafkaClusterSpec.getJvmOptions() == null ? true : kafkaClusterSpec.getJvmOptions().isGcLoggingEnabled());
result.setJvmOptions(kafkaClusterSpec.getJvmOptions());
result.setConfiguration(new KafkaConfiguration(kafkaClusterSpec.getConfig().entrySet()));
Map<String, Object> metrics = kafkaClusterSpec.getMetrics();
if (metrics != null) {
result.setMetricsEnabled(true);
result.setMetricsConfig(metrics.entrySet());
}
if (oldStorage != null) {
Storage newStorage = kafkaClusterSpec.getStorage();
StorageDiff diff = new StorageDiff(oldStorage, newStorage);
if (!diff.isEmpty()) {
log.warn("Only the following changes to Kafka storage are allowed: changing the deleteClaim flag, adding volumes to Jbod storage or removing volumes from Jbod storage and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
log.warn("Your desired Kafka storage configuration contains changes which are not allowed. As a result, all storage changes will be ignored. Use DEBUG level logging for more information about the detected changes.");
result.setStorage(oldStorage);
} else {
result.setStorage(newStorage);
}
} else {
result.setStorage(kafkaClusterSpec.getStorage());
}
result.setDataVolumesClaimsAndMountPaths(result.getStorage());
result.setUserAffinity(affinity(kafkaClusterSpec));
result.setResources(kafkaClusterSpec.getResources());
result.setTolerations(tolerations(kafkaClusterSpec));
TlsSidecar tlsSidecar = kafkaClusterSpec.getTlsSidecar();
if (tlsSidecar == null) {
tlsSidecar = new TlsSidecar();
}
if (tlsSidecar.getImage() == null) {
String tlsSidecarImage = versions.kafkaImage(kafkaClusterSpec.getImage(), versions.defaultVersion().version());
if (tlsSidecarImage == null) {
throw new InvalidResourceException("Version " + kafkaClusterSpec.getVersion() + " is not supported. Supported versions are: " + String.join(", ", versions.supportedVersions()) + ".");
}
tlsSidecar.setImage(tlsSidecarImage);
}
result.setTlsSidecar(tlsSidecar);
KafkaListeners listeners = kafkaClusterSpec.getListeners();
result.setListeners(listeners);
if (listeners != null) {
if (listeners.getPlain() != null
&& listeners.getPlain().getAuthentication() instanceof KafkaListenerAuthenticationTls) {
throw new InvalidResourceException("You cannot configure TLS authentication on a plain listener.");
}
if (listeners.getExternal() != null && !result.isExposedWithTls() && listeners.getExternal().getAuth() instanceof KafkaListenerAuthenticationTls) {
throw new InvalidResourceException("TLS Client Authentication can be used only with enabled TLS encryption!");
}
}
result.setAuthorization(kafkaClusterSpec.getAuthorization());
if (kafkaClusterSpec.getTemplate() != null) {
KafkaClusterTemplate template = kafkaClusterSpec.getTemplate();
if (template.getStatefulset() != null && template.getStatefulset().getMetadata() != null) {
result.templateStatefulSetLabels = template.getStatefulset().getMetadata().getLabels();
result.templateStatefulSetAnnotations = template.getStatefulset().getMetadata().getAnnotations();
}
ModelUtils.parsePodTemplate(result, template.getPod());
if (template.getBootstrapService() != null && template.getBootstrapService().getMetadata() != null) {
result.templateServiceLabels = template.getBootstrapService().getMetadata().getLabels();
result.templateServiceAnnotations = template.getBootstrapService().getMetadata().getAnnotations();
}
if (template.getBrokersService() != null && template.getBrokersService().getMetadata() != null) {
result.templateHeadlessServiceLabels = template.getBrokersService().getMetadata().getLabels();
result.templateHeadlessServiceAnnotations = template.getBrokersService().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapService() != null && template.getExternalBootstrapService().getMetadata() != null) {
result.templateExternalBootstrapServiceLabels = template.getExternalBootstrapService().getMetadata().getLabels();
result.templateExternalBootstrapServiceAnnotations = template.getExternalBootstrapService().getMetadata().getAnnotations();
}
if (template.getPerPodService() != null && template.getPerPodService().getMetadata() != null) {
result.templatePerPodServiceLabels = template.getPerPodService().getMetadata().getLabels();
result.templatePerPodServiceAnnotations = template.getPerPodService().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapRoute() != null && template.getExternalBootstrapRoute().getMetadata() != null) {
result.templateExternalBootstrapRouteLabels = template.getExternalBootstrapRoute().getMetadata().getLabels();
result.templateExternalBootstrapRouteAnnotations = template.getExternalBootstrapRoute().getMetadata().getAnnotations();
}
if (template.getPerPodRoute() != null && template.getPerPodRoute().getMetadata() != null) {
result.templatePerPodRouteLabels = template.getPerPodRoute().getMetadata().getLabels();
result.templatePerPodRouteAnnotations = template.getPerPodRoute().getMetadata().getAnnotations();
}
if (template.getExternalBootstrapIngress() != null && template.getExternalBootstrapIngress().getMetadata() != null) {
result.templateExternalBootstrapIngressLabels = template.getExternalBootstrapIngress().getMetadata().getLabels();
result.templateExternalBootstrapIngressAnnotations = template.getExternalBootstrapIngress().getMetadata().getAnnotations();
}
if (template.getPerPodIngress() != null && template.getPerPodIngress().getMetadata() != null) {
result.templatePerPodIngressLabels = template.getPerPodIngress().getMetadata().getLabels();
result.templatePerPodIngressAnnotations = template.getPerPodIngress().getMetadata().getAnnotations();
}
ModelUtils.parsePodDisruptionBudgetTemplate(result, template.getPodDisruptionBudget());
}
result.kafkaVersion = versions.version(kafkaClusterSpec.getVersion());
return result;
}
static List<Toleration> tolerations(KafkaClusterSpec kafkaClusterSpec) {
if (kafkaClusterSpec.getTemplate() != null
&& kafkaClusterSpec.getTemplate().getPod() != null
&& kafkaClusterSpec.getTemplate().getPod().getTolerations() != null) {
if (kafkaClusterSpec.getTolerations() != null) {
log.warn("Tolerations given on both spec.kafka.tolerations and spec.kafka.template.statefulset.tolerations; latter takes precedence");
}
return kafkaClusterSpec.getTemplate().getPod().getTolerations();
} else {
return kafkaClusterSpec.getTolerations();
}
}
static Affinity affinity(KafkaClusterSpec kafkaClusterSpec) {
if (kafkaClusterSpec.getTemplate() != null
&& kafkaClusterSpec.getTemplate().getPod() != null
&& kafkaClusterSpec.getTemplate().getPod().getAffinity() != null) {
if (kafkaClusterSpec.getAffinity() != null) {
log.warn("Affinity given on both spec.kafka.affinity and spec.kafka.template.statefulset.affinity; latter takes precedence");
}
return kafkaClusterSpec.getTemplate().getPod().getAffinity();
} else {
return kafkaClusterSpec.getAffinity();
}
}
/**
* Manage certificates generation based on those already present in the Secrets
*
* @param kafka The Kafka custom resource
* @param clusterCa The CA for cluster certificates
* @param externalBootstrapDnsName The set of DNS names for bootstrap service (should be appended to every broker certificate)
* @param externalDnsNames The list of DNS names for broker pods (should be appended only to specific certificates for given broker)
*/
public void generateCertificates(Kafka kafka, ClusterCa clusterCa, Set<String> externalBootstrapDnsName, Map<Integer, Set<String>> externalDnsNames) {
log.debug("Generating certificates");
try {
brokerCerts = clusterCa.generateBrokerCerts(kafka, externalBootstrapDnsName, externalDnsNames);
} catch (IOException e) {
log.warn("Error while generating certificates", e);
}
log.debug("End generating certificates");
}
/**
* Generates ports for bootstrap service.
* The bootstrap service contains only the client interfaces.
* Not the replication interface which doesn't need bootstrap service.
*
* @return List with generated ports
*/
private List<ServicePort> getServicePorts() {
List<ServicePort> ports = new ArrayList<>(4);
ports.add(createServicePort(REPLICATION_PORT_NAME, REPLICATION_PORT, REPLICATION_PORT, "TCP"));
if (listeners != null && listeners.getPlain() != null) {
ports.add(createServicePort(CLIENT_PORT_NAME, CLIENT_PORT, CLIENT_PORT, "TCP"));
}
if (listeners != null && listeners.getTls() != null) {
ports.add(createServicePort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT, CLIENT_TLS_PORT, "TCP"));
}
if (isMetricsEnabled()) {
ports.add(createServicePort(METRICS_PORT_NAME, METRICS_PORT, METRICS_PORT, "TCP"));
}
return ports;
}
/**
* Generates ports for headless service.
* The headless service contains both the client interfaces as well as replication interface.
*
* @return List with generated ports
*/
private List<ServicePort> getHeadlessServicePorts() {
List<ServicePort> ports = new ArrayList<>(3);
ports.add(createServicePort(REPLICATION_PORT_NAME, REPLICATION_PORT, REPLICATION_PORT, "TCP"));
if (listeners != null && listeners.getPlain() != null) {
ports.add(createServicePort(CLIENT_PORT_NAME, CLIENT_PORT, CLIENT_PORT, "TCP"));
}
if (listeners != null && listeners.getTls() != null) {
ports.add(createServicePort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT, CLIENT_TLS_PORT, "TCP"));
}
return ports;
}
/**
* Generates a Service according to configured defaults
* @return The generated Service
*/
public Service generateService() {
return createService("ClusterIP", getServicePorts(), mergeAnnotations(getPrometheusAnnotations(), templateServiceAnnotations));
}
/**
* Utility function to help to determine the type of service based on external listener configuration
*
* @return Service type
*/
private String getExternalServiceType() {
if (isExposedWithNodePort()) {
return "NodePort";
} else if (isExposedWithLoadBalancer()) {
return "LoadBalancer";
} else {
return "ClusterIP";
}
}
/**
* Generates external bootstrap service. This service is used for exposing it externally.
* It exposes only the external port 9094.
* Separate service is used to make sure that we do not expose the internal ports to the outside of the cluster
*
* @return The generated Service
*/
public Service generateExternalBootstrapService() {
if (isExposed()) {
String externalBootstrapServiceName = externalBootstrapServiceName(cluster);
List<ServicePort> ports;
Integer nodePort = null;
if (isExposedWithNodePort()) {
KafkaListenerExternalNodePort externalNodePort = (KafkaListenerExternalNodePort) listeners.getExternal();
if (externalNodePort.getOverrides() != null && externalNodePort.getOverrides().getBootstrap() != null) {
nodePort = externalNodePort.getOverrides().getBootstrap().getNodePort();
}
}
ports = Collections.singletonList(createServicePort(EXTERNAL_PORT_NAME, EXTERNAL_PORT, EXTERNAL_PORT,
nodePort, "TCP"));
Map<String, String> dnsAnnotations = Collections.emptyMap();
if (isExposedWithLoadBalancer()) {
KafkaListenerExternalLoadBalancer externalLb = (KafkaListenerExternalLoadBalancer) listeners.getExternal();
if (externalLb.getOverrides() != null && externalLb.getOverrides().getBootstrap() != null) {
dnsAnnotations = externalLb.getOverrides().getBootstrap().getDnsAnnotations();
}
}
return createService(externalBootstrapServiceName, getExternalServiceType(), ports,
getLabelsWithName(externalBootstrapServiceName, templateExternalBootstrapServiceLabels),
getSelectorLabels(),
mergeAnnotations(dnsAnnotations, templateExternalBootstrapServiceAnnotations));
}
return null;
}
/**
* Generates service for pod. This service is used for exposing it externally.
*
* @param pod Number of the pod for which this service should be generated
* @return The generated Service
*/
public Service generateExternalService(int pod) {
if (isExposed()) {
String perPodServiceName = externalServiceName(cluster, pod);
List<ServicePort> ports = new ArrayList<>(1);
Integer nodePort = null;
if (isExposedWithNodePort()) {
KafkaListenerExternalNodePort externalNodePort = (KafkaListenerExternalNodePort) listeners.getExternal();
if (externalNodePort.getOverrides() != null && externalNodePort.getOverrides().getBrokers() != null) {
nodePort = externalNodePort.getOverrides().getBrokers().stream()
.filter(broker -> broker != null && broker.getBroker() != null && broker.getBroker() == pod && broker.getNodePort() != null)
.map(NodePortListenerBrokerOverride::getNodePort)
.findAny().orElse(null);
}
}
ports.add(createServicePort(EXTERNAL_PORT_NAME, EXTERNAL_PORT, EXTERNAL_PORT, nodePort, "TCP"));
Map<String, String> dnsAnnotations = Collections.emptyMap();
if (isExposedWithLoadBalancer()) {
KafkaListenerExternalLoadBalancer externalLb = (KafkaListenerExternalLoadBalancer) listeners.getExternal();
if (externalLb.getOverrides() != null && externalLb.getOverrides().getBrokers() != null) {
dnsAnnotations = externalLb.getOverrides().getBrokers().stream()
.filter(broker -> broker != null && broker.getBroker() == pod)
.map(LoadBalancerListenerBrokerOverride::getDnsAnnotations)
.findAny()
.orElse(Collections.emptyMap());
}
}
Labels selector = Labels.fromMap(getSelectorLabels()).withStatefulSetPod(kafkaPodName(cluster, pod));
return createService(perPodServiceName, getExternalServiceType(), ports,
getLabelsWithName(perPodServiceName, templatePerPodServiceLabels), selector.toMap(),
mergeAnnotations(dnsAnnotations, templatePerPodServiceAnnotations));
}
return null;
}
/**
* Generates route for pod. This route is used for exposing it externally using OpenShift Routes.
*
* @param pod Number of the pod for which this route should be generated
* @return The generated Route
*/
public Route generateExternalRoute(int pod) {
if (isExposedWithRoute()) {
String perPodServiceName = externalServiceName(cluster, pod);
Route route = new RouteBuilder()
.withNewMetadata()
.withName(perPodServiceName)
.withLabels(getLabelsWithName(perPodServiceName, templatePerPodRouteLabels))
.withAnnotations(mergeAnnotations(null, templatePerPodRouteAnnotations))
.withNamespace(namespace)
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withNewTo()
.withKind("Service")
.withName(perPodServiceName)
.endTo()
.withNewPort()
.withNewTargetPort(EXTERNAL_PORT)
.endPort()
.withNewTls()
.withTermination("passthrough")
.endTls()
.endSpec()
.build();
KafkaListenerExternalRoute listener = (KafkaListenerExternalRoute) listeners.getExternal();
if (listener.getOverrides() != null && listener.getOverrides().getBrokers() != null) {
String specHost = listener.getOverrides().getBrokers().stream()
.filter(broker -> broker != null && broker.getBroker() == pod
&& broker.getHost() != null)
.map(RouteListenerBrokerOverride::getHost)
.findAny()
.orElse(null);
if (specHost != null && !specHost.isEmpty()) {
route.getSpec().setHost(specHost);
}
}
return route;
}
return null;
}
/**
* Generates a bootstrap route which can be used to bootstrap clients outside of OpenShift.
* @return The generated Routes
*/
public Route generateExternalBootstrapRoute() {
if (isExposedWithRoute()) {
Route route = new RouteBuilder()
.withNewMetadata()
.withName(serviceName)
.withLabels(getLabelsWithName(serviceName, templateExternalBootstrapRouteLabels))
.withAnnotations(mergeAnnotations(null, templateExternalBootstrapRouteAnnotations))
.withNamespace(namespace)
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withNewTo()
.withKind("Service")
.withName(externalBootstrapServiceName(cluster))
.endTo()
.withNewPort()
.withNewTargetPort(EXTERNAL_PORT)
.endPort()
.withNewTls()
.withTermination("passthrough")
.endTls()
.endSpec()
.build();
KafkaListenerExternalRoute listener = (KafkaListenerExternalRoute) listeners.getExternal();
if (listener.getOverrides() != null && listener.getOverrides().getBootstrap() != null && listener.getOverrides().getBootstrap().getHost() != null && !listener.getOverrides().getBootstrap().getHost().isEmpty()) {
route.getSpec().setHost(listener.getOverrides().getBootstrap().getHost());
}
return route;
}
return null;
}
/**
* Generates ingress for pod. This ingress is used for exposing it externally using Nginx Ingress.
*
* @param pod Number of the pod for which this ingress should be generated
* @return The generated Ingress
*/
public Ingress generateExternalIngress(int pod) {
if (isExposedWithIngress()) {
KafkaListenerExternalIngress listener = (KafkaListenerExternalIngress) listeners.getExternal();
Map<String, String> dnsAnnotations = null;
String host = null;
if (listener.getConfiguration() != null && listener.getConfiguration().getBrokers() != null) {
host = listener.getConfiguration().getBrokers().stream()
.filter(broker -> broker != null && broker.getBroker() == pod
&& broker.getHost() != null)
.map(IngressListenerBrokerConfiguration::getHost)
.findAny()
.orElseThrow(() -> new InvalidResourceException("Hostname for broker with id " + pod + " is required for exposing Kafka cluster using Ingress"));
dnsAnnotations = listener.getConfiguration().getBrokers().stream()
.filter(broker -> broker != null && broker.getBroker() == pod)
.map(IngressListenerBrokerConfiguration::getDnsAnnotations)
.findAny()
.orElse(null);
}
String perPodServiceName = externalServiceName(cluster, pod);
HTTPIngressPath path = new HTTPIngressPathBuilder()
.withPath("/")
.withNewBackend()
.withNewServicePort(EXTERNAL_PORT)
.withServiceName(perPodServiceName)
.endBackend()
.build();
IngressRule rule = new IngressRuleBuilder()
.withHost(host)
.withNewHttp()
.withPaths(path)
.endHttp()
.build();
IngressTLS tls = new IngressTLSBuilder()
.withHosts(host)
.build();
Ingress ingress = new IngressBuilder()
.withNewMetadata()
.withName(perPodServiceName)
.withLabels(getLabelsWithName(perPodServiceName, templatePerPodIngressLabels))
.withAnnotations(mergeAnnotations(generateInternalIngressAnnotations(), templatePerPodIngressAnnotations, dnsAnnotations))
.withNamespace(namespace)
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withRules(rule)
.withTls(tls)
.endSpec()
.build();
return ingress;
}
return null;
}
/**
* Generates a bootstrap ingress which can be used to bootstrap clients outside of Kubernetes.
* @return The generated Ingress
*/
public Ingress generateExternalBootstrapIngress() {
if (isExposedWithIngress()) {
KafkaListenerExternalIngress listener = (KafkaListenerExternalIngress) listeners.getExternal();
Map<String, String> dnsAnnotations;
String host;
if (listener.getConfiguration() != null && listener.getConfiguration().getBootstrap() != null && listener.getConfiguration().getBootstrap().getHost() != null) {
host = listener.getConfiguration().getBootstrap().getHost();
dnsAnnotations = listener.getConfiguration().getBootstrap().getDnsAnnotations();
} else {
throw new InvalidResourceException("Boostrap hostname is required for exposing Kafka cluster using Ingress");
}
HTTPIngressPath path = new HTTPIngressPathBuilder()
.withPath("/")
.withNewBackend()
.withNewServicePort(EXTERNAL_PORT)
.withServiceName(externalBootstrapServiceName(cluster))
.endBackend()
.build();
IngressRule rule = new IngressRuleBuilder()
.withHost(host)
.withNewHttp()
.withPaths(path)
.endHttp()
.build();
IngressTLS tls = new IngressTLSBuilder()
.withHosts(host)
.build();
Ingress ingress = new IngressBuilder()
.withNewMetadata()
.withName(serviceName)
.withLabels(getLabelsWithName(serviceName, templateExternalBootstrapIngressLabels))
.withAnnotations(mergeAnnotations(generateInternalIngressAnnotations(), templateExternalBootstrapIngressAnnotations, dnsAnnotations))
.withNamespace(namespace)
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withRules(rule)
.withTls(tls)
.endSpec()
.build();
return ingress;
}
return null;
}
/**
* Generates the annotations needed to configure the Ingress as TLS passthrough
*
* @return Map with the annotations
*/
private Map<String, String> generateInternalIngressAnnotations() {
Map<String, String> internalAnnotations = new HashMap<>(4);
internalAnnotations.put("kubernetes.io/ingress.class", "nginx");
internalAnnotations.put("ingress.kubernetes.io/ssl-passthrough", "true");
internalAnnotations.put("nginx.ingress.kubernetes.io/ssl-passthrough", "true");
internalAnnotations.put("nginx.ingress.kubernetes.io/backend-protocol", "HTTPS");
return internalAnnotations;
}
/**
* Generates a headless Service according to configured defaults
* @return The generated Service
*/
public Service generateHeadlessService() {
return createHeadlessService(getHeadlessServicePorts());
}
/**
* Generates a StatefulSet according to configured defaults
* @param isOpenShift True iff this operator is operating within OpenShift.
* @return The generate StatefulSet
*/
public StatefulSet generateStatefulSet(boolean isOpenShift, ImagePullPolicy imagePullPolicy, List<LocalObjectReference> imagePullSecrets) {
HashMap<String, String> annotations = new HashMap<>(2);
annotations.put(ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion.version());
annotations.put(ANNO_STRIMZI_IO_STORAGE, ModelUtils.encodeStorageToJson(storage));
return createStatefulSet(
annotations,
getVolumes(isOpenShift),
getVolumeClaims(),
getMergedAffinity(),
getInitContainers(imagePullPolicy),
getContainers(imagePullPolicy),
imagePullSecrets,
isOpenShift);
}
/**
* Generate the Secret containing the Kafka brokers certificates signed by the cluster CA certificate used for TLS based
* internal communication with Zookeeper.
* It also contains the related Kafka brokers private keys.
*
* @return The generated Secret
*/
public Secret generateBrokersSecret() {
Map<String, String> data = new HashMap<>();
for (int i = 0; i < replicas; i++) {
CertAndKey cert = brokerCerts.get(KafkaCluster.kafkaPodName(cluster, i));
data.put(KafkaCluster.kafkaPodName(cluster, i) + ".key", cert.keyAsBase64String());
data.put(KafkaCluster.kafkaPodName(cluster, i) + ".crt", cert.certAsBase64String());
}
return createSecret(KafkaCluster.brokersSecretName(cluster), data);
}
private List<ContainerPort> getContainerPortList() {
List<ContainerPort> portList = new ArrayList<>(5);
portList.add(createContainerPort(REPLICATION_PORT_NAME, REPLICATION_PORT, "TCP"));
if (listeners != null && listeners.getPlain() != null) {
portList.add(createContainerPort(CLIENT_PORT_NAME, CLIENT_PORT, "TCP"));
}
if (listeners != null && listeners.getTls() != null) {
portList.add(createContainerPort(CLIENT_TLS_PORT_NAME, CLIENT_TLS_PORT, "TCP"));
}
if (isExposed()) {
portList.add(createContainerPort(EXTERNAL_PORT_NAME, EXTERNAL_PORT, "TCP"));
}
if (isMetricsEnabled) {
portList.add(createContainerPort(METRICS_PORT_NAME, METRICS_PORT, "TCP"));
}
return portList;
}
/**
* Fill the StatefulSet with volumes, persistent volume claims and related volume mount paths for the storage
* It's called recursively on the related inner volumes if the storage is of {@link Storage#TYPE_JBOD} type
*
* @param storage the Storage instance from which building volumes, persistent volume claims and
* related volume mount paths
*/
private void setDataVolumesClaimsAndMountPaths(Storage storage) {
if (storage != null) {
Integer id;
if (storage instanceof EphemeralStorage) {
id = ((EphemeralStorage) storage).getId();
} else if (storage instanceof PersistentClaimStorage) {
id = ((PersistentClaimStorage) storage).getId();
} else if (storage instanceof JbodStorage) {
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
if (volume.getId() == null)
throw new InvalidResourceException("Volumes under JBOD storage type have to have 'id' property");
// it's called recursively for setting the information from the current volume
setDataVolumesClaimsAndMountPaths(volume);
}
return;
} else {
throw new IllegalStateException("The declared storage '" + storage.getType() + "' is not supported");
}
String name = ModelUtils.getVolumePrefix(id);
String mountPath = this.mountPath + "/" + name;
if (storage instanceof EphemeralStorage) {
dataVolumes.add(createEmptyDirVolume(name));
} else if (storage instanceof PersistentClaimStorage) {
dataPvcs.add(createPersistentVolumeClaimTemplate(name, (PersistentClaimStorage) storage));
}
dataVolumeMountPaths.add(createVolumeMount(name, mountPath));
}
}
/**
* Generate the persistent volume claims for the storage It's called recursively on the related inner volumes if the
* storage is of {@link Storage#TYPE_JBOD} type
*
* @param storage the Storage instance from which building volumes, persistent volume claims and
* related volume mount paths
*/
public List<PersistentVolumeClaim> generatePersistentVolumeClaims(Storage storage) {
List<PersistentVolumeClaim> pvcs = new ArrayList<>();
if (storage != null) {
if (storage instanceof PersistentClaimStorage) {
Integer id = ((PersistentClaimStorage) storage).getId();
String pvcBaseName = ModelUtils.getVolumePrefix(id) + "-" + name;
for (int i = 0; i < replicas; i++) {
pvcs.add(createPersistentVolumeClaim(i, pvcBaseName + "-" + i, (PersistentClaimStorage) storage));
}
} else if (storage instanceof JbodStorage) {
for (SingleVolumeStorage volume : ((JbodStorage) storage).getVolumes()) {
if (volume.getId() == null)
throw new InvalidResourceException("Volumes under JBOD storage type have to have 'id' property");
// it's called recursively for setting the information from the current volume
pvcs.addAll(generatePersistentVolumeClaims(volume));
}
}
}
return pvcs;
}
private List<Volume> getVolumes(boolean isOpenShift) {
List<Volume> volumeList = new ArrayList<>();
volumeList.addAll(dataVolumes);
if (rack != null || isExposedWithNodePort()) {
volumeList.add(createEmptyDirVolume(INIT_VOLUME_NAME));
}
volumeList.add(createSecretVolume(CLUSTER_CA_CERTS_VOLUME, AbstractModel.clusterCaCertSecretName(cluster), isOpenShift));
volumeList.add(createSecretVolume(BROKER_CERTS_VOLUME, KafkaCluster.brokersSecretName(cluster), isOpenShift));
volumeList.add(createSecretVolume(CLIENT_CA_CERTS_VOLUME, KafkaCluster.clientsCaCertSecretName(cluster), isOpenShift));
volumeList.add(createConfigMapVolume(logAndMetricsConfigVolumeName, ancillaryConfigName));
volumeList.add(new VolumeBuilder().withName("ready-files").withNewEmptyDir().withMedium("Memory").endEmptyDir().build());
return volumeList;
}
/* test */ List<PersistentVolumeClaim> getVolumeClaims() {
List<PersistentVolumeClaim> pvcList = new ArrayList<>();
pvcList.addAll(dataPvcs);
return pvcList;
}
private List<VolumeMount> getVolumeMounts() {
List<VolumeMount> volumeMountList = new ArrayList<>();
volumeMountList.addAll(dataVolumeMountPaths);
volumeMountList.add(createVolumeMount(CLUSTER_CA_CERTS_VOLUME, CLUSTER_CA_CERTS_VOLUME_MOUNT));
volumeMountList.add(createVolumeMount(BROKER_CERTS_VOLUME, BROKER_CERTS_VOLUME_MOUNT));
volumeMountList.add(createVolumeMount(CLIENT_CA_CERTS_VOLUME, CLIENT_CA_CERTS_VOLUME_MOUNT));
volumeMountList.add(createVolumeMount(logAndMetricsConfigVolumeName, logAndMetricsConfigMountPath));
volumeMountList.add(createVolumeMount("ready-files", "/var/opt/kafka"));
if (rack != null || isExposedWithNodePort()) {
volumeMountList.add(createVolumeMount(INIT_VOLUME_NAME, INIT_VOLUME_MOUNT));
}
return volumeMountList;
}
/**
* Returns a combined affinity: Adding the affinity needed for the "kafka-rack" to the {@link #getUserAffinity()}.
*/
@Override
protected Affinity getMergedAffinity() {
Affinity userAffinity = getUserAffinity();
AffinityBuilder builder = new AffinityBuilder(userAffinity == null ? new Affinity() : userAffinity);
if (rack != null) {
// If there's a rack config, we need to add a podAntiAffinity to spread the brokers among the racks
builder = builder
.editOrNewPodAntiAffinity()
.addNewPreferredDuringSchedulingIgnoredDuringExecution()
.withWeight(100)
.withNewPodAffinityTerm()
.withTopologyKey(rack.getTopologyKey())
.withNewLabelSelector()
.addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, cluster)
.addToMatchLabels(Labels.STRIMZI_NAME_LABEL, name)
.endLabelSelector()
.endPodAffinityTerm()
.endPreferredDuringSchedulingIgnoredDuringExecution()
.endPodAntiAffinity();
}
return builder.build();
}
@Override
protected List<Container> getInitContainers(ImagePullPolicy imagePullPolicy) {
List<Container> initContainers = new ArrayList<>();
if (rack != null || isExposedWithNodePort()) {
ResourceRequirements resources = new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.addToRequests("memory", new Quantity("128Mi"))
.addToLimits("cpu", new Quantity("1"))
.addToLimits("memory", new Quantity("256Mi"))
.build();
List<EnvVar> varList = new ArrayList<>();
varList.add(buildEnvVarFromFieldRef(ENV_VAR_KAFKA_INIT_NODE_NAME, "spec.nodeName"));
if (rack != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_INIT_RACK_TOPOLOGY_KEY, rack.getTopologyKey()));
}
if (isExposedWithNodePort()) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_INIT_EXTERNAL_ADDRESS, "TRUE"));
varList.add(buildEnvVar(ENV_VAR_KAFKA_INIT_EXTERNAL_ADVERTISED_ADDRESSES, String.join(" ", externalAddresses)));
}
Container initContainer = new ContainerBuilder()
.withName(INIT_NAME)
.withImage(initImage)
.withArgs("/opt/strimzi/bin/kafka_init_run.sh")
.withResources(resources)
.withEnv(varList)
.withVolumeMounts(createVolumeMount(INIT_VOLUME_NAME, INIT_VOLUME_MOUNT))
.withImagePullPolicy(determineImagePullPolicy(imagePullPolicy, initImage))
.build();
initContainers.add(initContainer);
}
return initContainers;
}
@Override
protected List<Container> getContainers(ImagePullPolicy imagePullPolicy) {
List<Container> containers = new ArrayList<>();
Container container = new ContainerBuilder()
.withName(KAFKA_NAME)
.withImage(getImage())
.withEnv(getEnvVars())
.withVolumeMounts(getVolumeMounts())
.withPorts(getContainerPortList())
.withNewLivenessProbe()
.withInitialDelaySeconds(livenessInitialDelay)
.withTimeoutSeconds(livenessTimeout)
.withNewExec()
.withCommand("/opt/kafka/kafka_liveness.sh")
.endExec()
.endLivenessProbe()
.withNewReadinessProbe()
.withInitialDelaySeconds(readinessInitialDelay)
.withTimeoutSeconds(readinessTimeout)
.withNewExec()
// The kafka-agent will create /var/opt/kafka/kafka-ready in the container
.withCommand("test", "-f", "/var/opt/kafka/kafka-ready")
.endExec()
.endReadinessProbe()
.withResources(getResources())
.withImagePullPolicy(determineImagePullPolicy(imagePullPolicy, getImage()))
.withCommand("/opt/kafka/kafka_run.sh")
.build();
String tlsSidecarImage = getImage();
if (tlsSidecar != null && tlsSidecar.getImage() != null) {
tlsSidecarImage = tlsSidecar.getImage();
}
Container tlsSidecarContainer = new ContainerBuilder()
.withName(TLS_SIDECAR_NAME)
.withImage(tlsSidecarImage)
.withCommand("/opt/stunnel/kafka_stunnel_run.sh")
.withLivenessProbe(ModelUtils.tlsSidecarLivenessProbe(tlsSidecar))
.withReadinessProbe(ModelUtils.tlsSidecarReadinessProbe(tlsSidecar))
.withResources(tlsSidecar != null ? tlsSidecar.getResources() : null)
.withEnv(asList(buildEnvVar(ENV_VAR_KAFKA_ZOOKEEPER_CONNECT, zookeeperConnect),
ModelUtils.tlsSidecarLogEnvVar(tlsSidecar)))
.withVolumeMounts(createVolumeMount(BROKER_CERTS_VOLUME, TLS_SIDECAR_KAFKA_CERTS_VOLUME_MOUNT),
createVolumeMount(CLUSTER_CA_CERTS_VOLUME, TLS_SIDECAR_CLUSTER_CA_CERTS_VOLUME_MOUNT))
.withLifecycle(new LifecycleBuilder().withNewPreStop()
.withNewExec().withCommand("/opt/stunnel/kafka_stunnel_pre_stop.sh",
String.valueOf(templateTerminationGracePeriodSeconds))
.endExec().endPreStop().build())
.withImagePullPolicy(determineImagePullPolicy(imagePullPolicy, tlsSidecarImage))
.build();
containers.add(container);
containers.add(tlsSidecarContainer);
return containers;
}
@Override
protected String getServiceAccountName() {
return initContainerServiceAccountName(cluster);
}
@Override
protected List<EnvVar> getEnvVars() {
List<EnvVar> varList = new ArrayList<>();
varList.add(buildEnvVar(ENV_VAR_KAFKA_METRICS_ENABLED, String.valueOf(isMetricsEnabled)));
varList.add(buildEnvVar(ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED, String.valueOf(gcLoggingEnabled)));
heapOptions(varList, 0.5, 5L * 1024L * 1024L * 1024L);
jvmPerformanceOptions(varList);
if (configuration != null && !configuration.getConfiguration().isEmpty()) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_CONFIGURATION, configuration.getConfiguration()));
}
if (listeners != null) {
if (listeners.getPlain() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_CLIENT_ENABLED, "TRUE"));
if (listeners.getPlain().getAuthentication() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_CLIENT_AUTHENTICATION, listeners.getPlain().getAuthentication().getType()));
}
}
if (listeners.getTls() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_CLIENTTLS_ENABLED, "TRUE"));
if (listeners.getTls().getAuth() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_CLIENTTLS_AUTHENTICATION, listeners.getTls().getAuth().getType()));
}
}
if (listeners.getExternal() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_EXTERNAL_ENABLED, listeners.getExternal().getType()));
varList.add(buildEnvVar(ENV_VAR_KAFKA_EXTERNAL_ADDRESSES, String.join(" ", externalAddresses)));
varList.add(buildEnvVar(ENV_VAR_KAFKA_EXTERNAL_TLS, Boolean.toString(isExposedWithTls())));
if (listeners.getExternal().getAuth() != null) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_EXTERNAL_AUTHENTICATION, listeners.getExternal().getAuth().getType()));
}
}
}
if (authorization != null && KafkaAuthorizationSimple.TYPE_SIMPLE.equals(authorization.getType())) {
varList.add(buildEnvVar(ENV_VAR_KAFKA_AUTHORIZATION_TYPE, KafkaAuthorizationSimple.TYPE_SIMPLE));
KafkaAuthorizationSimple simpleAuthz = (KafkaAuthorizationSimple) authorization;
if (simpleAuthz.getSuperUsers() != null && simpleAuthz.getSuperUsers().size() > 0) {
String superUsers = simpleAuthz.getSuperUsers().stream().map(e -> String.format("User:%s", e)).collect(Collectors.joining(";"));
varList.add(buildEnvVar(ENV_VAR_KAFKA_AUTHORIZATION_SUPER_USERS, superUsers));
}
}
String logDirs = dataVolumeMountPaths.stream()
.map(volumeMount -> volumeMount.getMountPath()).collect(Collectors.joining(","));
varList.add(buildEnvVar(ENV_VAR_KAFKA_LOG_DIRS, logDirs));
return varList;
}
protected void setZookeeperConnect(String zookeeperConnect) {
this.zookeeperConnect = zookeeperConnect;
}
protected void setRack(Rack rack) {
this.rack = rack;
}
protected void setInitImage(String initImage) {
this.initImage = initImage;
}
protected void setTlsSidecar(TlsSidecar tlsSidecar) {
this.tlsSidecar = tlsSidecar;
}
@Override
protected String getDefaultLogConfigFileName() {
return "kafkaDefaultLoggingProperties";
}
/**
* Get the name of the kafka service account given the name of the {@code kafkaResourceName}.
*/
public static String initContainerServiceAccountName(String kafkaResourceName) {
return kafkaClusterName(kafkaResourceName);
}
/**
* Get the name of the kafka init container role binding given the name of the {@code namespace} and {@code cluster}.
*/
public static String initContainerClusterRoleBindingName(String namespace, String cluster) {
return "strimzi-" + namespace + "-" + cluster + "-kafka-init";
}
/**
* Creates the ClusterRoleBinding which is used to bind the Kafka SA to the ClusterRole
* which permissions the Kafka init container to access K8S nodes (necessary for rack-awareness).
*/
public KubernetesClusterRoleBinding generateClusterRoleBinding(String assemblyNamespace) {
if (rack != null || isExposedWithNodePort()) {
KubernetesSubject ks = new KubernetesSubjectBuilder()
.withKind("ServiceAccount")
.withName(initContainerServiceAccountName(cluster))
.withNamespace(assemblyNamespace)
.build();
KubernetesRoleRef roleRef = new KubernetesRoleRefBuilder()
.withName("strimzi-kafka-broker")
.withApiGroup("rbac.authorization.k8s.io")
.withKind("ClusterRole")
.build();
return new KubernetesClusterRoleBindingBuilder()
.withNewMetadata()
.withName(initContainerClusterRoleBindingName(namespace, cluster))
.withNamespace(assemblyNamespace)
.withOwnerReferences(createOwnerReference())
.withLabels(labels.toMap())
.endMetadata()
.withSubjects(ks)
.withRoleRef(roleRef)
.build();
} else {
return null;
}
}
public static String policyName(String cluster) {
return cluster + NETWORK_POLICY_KEY_SUFFIX + NAME_SUFFIX;
}
public NetworkPolicy generateNetworkPolicy() {
List<NetworkPolicyIngressRule> rules = new ArrayList<>(5);
// Restrict access to 9091 / replication port
NetworkPolicyPort replicationPort = new NetworkPolicyPort();
replicationPort.setPort(new IntOrString(REPLICATION_PORT));
NetworkPolicyPeer kafkaClusterPeer = new NetworkPolicyPeer();
LabelSelector labelSelector = new LabelSelector();
Map<String, String> expressions = new HashMap<>();
expressions.put(Labels.STRIMZI_NAME_LABEL, kafkaClusterName(cluster));
labelSelector.setMatchLabels(expressions);
kafkaClusterPeer.setPodSelector(labelSelector);
NetworkPolicyPeer entityOperatorPeer = new NetworkPolicyPeer();
LabelSelector labelSelector2 = new LabelSelector();
Map<String, String> expressions2 = new HashMap<>();
expressions2.put(Labels.STRIMZI_NAME_LABEL, EntityOperator.entityOperatorName(cluster));
labelSelector2.setMatchLabels(expressions2);
entityOperatorPeer.setPodSelector(labelSelector2);
NetworkPolicyIngressRule replicationRule = new NetworkPolicyIngressRuleBuilder()
.withPorts(replicationPort)
.withFrom(kafkaClusterPeer, entityOperatorPeer)
.build();
rules.add(replicationRule);
// Free access to 9092, 9093 and 9094 ports
if (listeners != null) {
if (listeners.getPlain() != null) {
NetworkPolicyPort plainPort = new NetworkPolicyPort();
plainPort.setPort(new IntOrString(CLIENT_PORT));
NetworkPolicyIngressRule plainRule = new NetworkPolicyIngressRuleBuilder()
.withPorts(plainPort)
.withFrom(listeners.getPlain().getNetworkPolicyPeers())
.build();
rules.add(plainRule);
}
if (listeners.getTls() != null) {
NetworkPolicyPort tlsPort = new NetworkPolicyPort();
tlsPort.setPort(new IntOrString(CLIENT_TLS_PORT));
NetworkPolicyIngressRule tlsRule = new NetworkPolicyIngressRuleBuilder()
.withPorts(tlsPort)
.withFrom(listeners.getTls().getNetworkPolicyPeers())
.build();
rules.add(tlsRule);
}
if (isExposed()) {
NetworkPolicyPort externalPort = new NetworkPolicyPort();
externalPort.setPort(new IntOrString(EXTERNAL_PORT));
NetworkPolicyIngressRule externalRule = new NetworkPolicyIngressRuleBuilder()
.withPorts(externalPort)
.withFrom(listeners.getExternal().getNetworkPolicyPeers())
.build();
rules.add(externalRule);
}
}
if (isMetricsEnabled) {
NetworkPolicyPort metricsPort = new NetworkPolicyPort();
metricsPort.setPort(new IntOrString(METRICS_PORT));
NetworkPolicyIngressRule metricsRule = new NetworkPolicyIngressRuleBuilder()
.withPorts(metricsPort)
.withFrom()
.build();
rules.add(metricsRule);
}
NetworkPolicy networkPolicy = new NetworkPolicyBuilder()
.withNewMetadata()
.withName(policyName(cluster))
.withNamespace(namespace)
.withLabels(labels.toMap())
.withOwnerReferences(createOwnerReference())
.endMetadata()
.withNewSpec()
.withPodSelector(labelSelector)
.withIngress(rules)
.endSpec()
.build();
log.trace("Created network policy {}", networkPolicy);
return networkPolicy;
}
/**
* Generates the PodDisruptionBudget
*
* @return
*/
public PodDisruptionBudget generatePodDisruptionBudget() {
return createPodDisruptionBudget();
}
/**
* Sets the object with Kafka listeners configuration
*
* @param listeners
*/
public void setListeners(KafkaListeners listeners) {
this.listeners = listeners;
}
/**
* Sets the object with Kafka authorization configuration
*
* @param authorization
*/
public void setAuthorization(KafkaAuthorization authorization) {
this.authorization = authorization;
}
/**
* Sets the Map with Kafka pod's external addresses
*
* @param externalAddresses Set with external addresses
*/
public void setExternalAddresses(Set<String> externalAddresses) {
this.externalAddresses = externalAddresses;
}
/**
* Returns true when the Kafka cluster is exposed to the outside of OpenShift / Kubernetes
*
* @return
*/
public boolean isExposed() {
return listeners != null && listeners.getExternal() != null;
}
/**
* Returns true when the Kafka cluster is exposed to the outside of OpenShift using OpenShift routes
*
* @return
*/
public boolean isExposedWithRoute() {
return isExposed() && listeners.getExternal() instanceof KafkaListenerExternalRoute;
}
/**
* Returns true when the Kafka cluster is exposed to the outside using LoadBalancers
*
* @return
*/
public boolean isExposedWithLoadBalancer() {
return isExposed() && listeners.getExternal() instanceof KafkaListenerExternalLoadBalancer;
}
/**
* Returns true when the Kafka cluster is exposed to the outside using NodePort type services
*
* @return
*/
public boolean isExposedWithNodePort() {
return isExposed() && listeners.getExternal() instanceof KafkaListenerExternalNodePort;
}
/**
* Returns true when the Kafka cluster is exposed to the outside of Kubernetes using Ingress
*
* @return
*/
public boolean isExposedWithIngress() {
return isExposed() && listeners.getExternal() instanceof KafkaListenerExternalIngress;
}
/**
* Returns the list broker overrides for external listeners
*
* @return
*/
private List<ExternalListenerBrokerOverride> getExternalListenerBrokerOverride() {
List<ExternalListenerBrokerOverride> brokerOverride = new ArrayList<>();
if (isExposedWithNodePort()) {
NodePortListenerOverride overrides = ((KafkaListenerExternalNodePort) listeners.getExternal()).getOverrides();
if (overrides != null && overrides.getBrokers() != null) {
brokerOverride.addAll(overrides.getBrokers());
}
} else if (isExposedWithLoadBalancer()) {
LoadBalancerListenerOverride overrides = ((KafkaListenerExternalLoadBalancer) listeners.getExternal()).getOverrides();
if (overrides != null && overrides.getBrokers() != null) {
brokerOverride.addAll(overrides.getBrokers());
}
} else if (isExposedWithRoute()) {
RouteListenerOverride overrides = ((KafkaListenerExternalRoute) listeners.getExternal()).getOverrides();
if (overrides != null && overrides.getBrokers() != null) {
brokerOverride.addAll(overrides.getBrokers());
}
} else if (isExposedWithIngress()) {
IngressListenerConfiguration configuration = ((KafkaListenerExternalIngress) listeners.getExternal()).getConfiguration();
if (configuration != null && configuration.getBrokers() != null) {
brokerOverride.addAll(configuration.getBrokers());
}
}
return brokerOverride;
}
/**
* Returns the bootstrap override for external listeners
*
* @return
*/
public ExternalListenerBootstrapOverride getExternalListenerBootstrapOverride() {
ExternalListenerBootstrapOverride bootstrapOverride = null;
if (isExposedWithNodePort()) {
NodePortListenerOverride overrides = ((KafkaListenerExternalNodePort) listeners.getExternal()).getOverrides();
if (overrides != null) {
bootstrapOverride = overrides.getBootstrap();
}
} else if (isExposedWithLoadBalancer()) {
LoadBalancerListenerOverride overrides = ((KafkaListenerExternalLoadBalancer) listeners.getExternal()).getOverrides();
if (overrides != null) {
bootstrapOverride = overrides.getBootstrap();
}
} else if (isExposedWithRoute()) {
RouteListenerOverride overrides = ((KafkaListenerExternalRoute) listeners.getExternal()).getOverrides();
if (overrides != null) {
bootstrapOverride = overrides.getBootstrap();
}
} else if (isExposedWithIngress()) {
IngressListenerConfiguration configuration = ((KafkaListenerExternalIngress) listeners.getExternal()).getConfiguration();
if (configuration != null) {
bootstrapOverride = configuration.getBootstrap();
}
}
return bootstrapOverride;
}
/**
* Returns advertised address of external nodeport service
*
* @return
*/
public String getExternalServiceAdvertisedHostOverride(int podNumber) {
String advertisedHost = null;
List<ExternalListenerBrokerOverride> brokerOverride = getExternalListenerBrokerOverride();
advertisedHost = brokerOverride.stream()
.filter(brokerService -> brokerService != null && brokerService.getBroker() == podNumber
&& brokerService.getAdvertisedHost() != null)
.map(ExternalListenerBrokerOverride::getAdvertisedHost)
.findAny()
.orElse(null);
if (advertisedHost != null && advertisedHost.isEmpty()) {
advertisedHost = null;
}
return advertisedHost;
}
/**
* Returns advertised address of external nodeport service
*
* @return
*/
public Integer getExternalServiceAdvertisedPortOverride(int podNumber) {
Integer advertisedPort = null;
List<ExternalListenerBrokerOverride> brokerOverride = getExternalListenerBrokerOverride();
advertisedPort = brokerOverride.stream()
.filter(brokerService -> brokerService != null && brokerService.getBroker() == podNumber
&& brokerService.getAdvertisedPort() != null)
.map(ExternalListenerBrokerOverride::getAdvertisedPort)
.findAny()
.orElse(null);
if (advertisedPort != null && advertisedPort == 0) {
advertisedPort = null;
}
return advertisedPort;
}
/**
* Returns the advertised URL for given pod.
* It will take into account the overrides specified by the user.
* If some segment is not know - e.g. the hostname for the NodePort access, it should be left empty
*
* @param podNumber Pod index
* @param address The advertised hostname
* @param port The advertised port
* @return The advertised URL in format podNumber://address:port (e.g. 1://my-broker-1:9094)
*/
public String getExternalAdvertisedUrl(int podNumber, String address, String port) {
String advertisedHost = getExternalServiceAdvertisedHostOverride(podNumber);
Integer advertisedPort = getExternalServiceAdvertisedPortOverride(podNumber);
String url = String.valueOf(podNumber)
+ "://" + (advertisedHost != null ? advertisedHost : address)
+ ":" + (advertisedPort != null ? advertisedPort : port);
return url;
}
/**
* Returns true when the Kafka cluster is exposed to the outside of OpenShift with TLS enabled
*
* @return
*/
public boolean isExposedWithTls() {
if (isExposed()) {
if (listeners.getExternal() instanceof KafkaListenerExternalRoute
|| listeners.getExternal() instanceof KafkaListenerExternalIngress) {
return true;
} else {
if (listeners.getExternal() instanceof KafkaListenerExternalLoadBalancer) {
return ((KafkaListenerExternalLoadBalancer) listeners.getExternal()).isTls();
} else if (listeners.getExternal() instanceof KafkaListenerExternalNodePort) {
return ((KafkaListenerExternalNodePort) listeners.getExternal()).isTls();
}
}
}
return false;
}
@Override
public KafkaConfiguration getConfiguration() {
return (KafkaConfiguration) configuration;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
src/main/java/bot/commands/admin/Link.java | /*
* Copyright 2020 Md Rafi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bot.commands.admin;
import bot.commands.CommandContext;
import bot.commands.CommandInterface;
import net.dv8tion.jda.api.Permission;
public class Link implements CommandInterface {
@Override
public void handle(CommandContext c) {
if(c.getCommandParameters().isEmpty() && c.getMember().getId().equals(System.getenv("BOT_OWNER"))) {
c.getChannel().sendTyping().queue();
c.getChannel().sendMessage(c.getEvent().getJDA().getInviteUrl(Permission.ADMINISTRATOR)).reference(c.getMessage()).mentionRepliedUser(false).queue();
}
}
@Override
public String getName() {
return "link";
}
} | [
"\"BOT_OWNER\""
]
| []
| [
"BOT_OWNER"
]
| [] | ["BOT_OWNER"] | java | 1 | 0 | |
v2/configuration_file_test.go | /*
BSD 3-Clause License
Copyright (c) 2021, Outscale SAS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package osc_test
import (
"context"
"fmt"
"os"
osc "github.com/outscale/osc-sdk-go/v2"
)
// This example shows how load credentials from a local configuration file.
func ExampleConfigurationFile() {
// When running those examples, configuration file may not exist.
// It can be manually created or generated depending of your application.
// Here we are creating it for simplicity sake.
configPath := "/tmp/osc-sdk-go-ExampleConfigurationFile"
createConfigurationFile(configPath)
// Load configuation from default location
// You can also use osc.LoadDefaultConfigFile() (~/.osc/config.json by default on Linux or MacOS)
configFile, err := osc.LoadConfigFile(&configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error while loading default configuration file: %s", err.Error())
os.Exit(1)
}
config, err := configFile.Configuration("default")
if err != nil {
fmt.Fprintf(os.Stderr, "Error while creating configuration: %s", err.Error())
os.Exit(1)
}
ctx, err := configFile.Context(context.Background(), "default")
if err != nil {
fmt.Fprintf(os.Stderr, "Error while creating context: %s", err.Error())
os.Exit(1)
}
client := osc.NewAPIClient(config)
_, httpRes, err := client.SecurityGroupApi.ReadSecurityGroups(ctx).ReadSecurityGroupsRequest(osc.ReadSecurityGroupsRequest{}).Execute()
if err != nil {
fmt.Fprintln(os.Stderr, "Error while reading security groups")
if httpRes != nil {
fmt.Fprintln(os.Stderr, httpRes.Status, httpRes.Body)
}
os.Exit(1)
}
fmt.Println("configuration file journey is over")
// Output: configuration file journey is over
}
func createConfigurationFile(configPath string) {
ak := os.Getenv("OSC_ACCESS_KEY")
sk := os.Getenv("OSC_SECRET_KEY")
region := os.Getenv("OSC_REGION")
content := fmt.Sprintf(`{
"default": {
"access_key": "%s",
"secret_key": "%s",
"region": "%s"
}}`, ak, sk, region)
jsonFile, _ := os.Create(configPath)
defer jsonFile.Close()
jsonFile.WriteString(content)
}
| [
"\"OSC_ACCESS_KEY\"",
"\"OSC_SECRET_KEY\"",
"\"OSC_REGION\""
]
| []
| [
"OSC_ACCESS_KEY",
"OSC_SECRET_KEY",
"OSC_REGION"
]
| [] | ["OSC_ACCESS_KEY", "OSC_SECRET_KEY", "OSC_REGION"] | go | 3 | 0 | |
cmd/drone-docker/main.go | package main
import (
"os"
"github.com/joho/godotenv"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
docker "github.com/drone-plugins/drone-docker"
)
var (
version = "unknown"
)
func main() {
// Load env-file if it exists first
if env := os.Getenv("PLUGIN_ENV_FILE"); env != "" {
godotenv.Load(env)
}
app := cli.NewApp()
app.Name = "docker plugin"
app.Usage = "docker plugin"
app.Action = run
app.Version = version
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "dry-run",
Usage: "dry run disables docker push",
EnvVar: "PLUGIN_DRY_RUN",
},
cli.StringFlag{
Name: "remote.url",
Usage: "git remote url",
EnvVar: "DRONE_REMOTE_URL",
},
cli.StringFlag{
Name: "commit.sha",
Usage: "git commit sha",
EnvVar: "DRONE_COMMIT_SHA",
Value: "00000000",
},
cli.StringFlag{
Name: "commit.ref",
Usage: "git commit ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "daemon.mirror",
Usage: "docker daemon registry mirror",
EnvVar: "PLUGIN_MIRROR,DOCKER_PLUGIN_MIRROR",
},
cli.StringFlag{
Name: "daemon.storage-driver",
Usage: "docker daemon storage driver",
EnvVar: "PLUGIN_STORAGE_DRIVER",
},
cli.StringFlag{
Name: "daemon.storage-path",
Usage: "docker daemon storage path",
Value: "/var/lib/docker",
EnvVar: "PLUGIN_STORAGE_PATH",
},
cli.StringFlag{
Name: "daemon.bip",
Usage: "docker daemon bride ip address",
EnvVar: "PLUGIN_BIP",
},
cli.StringFlag{
Name: "daemon.mtu",
Usage: "docker daemon custom mtu setting",
EnvVar: "PLUGIN_MTU",
},
cli.StringSliceFlag{
Name: "daemon.dns",
Usage: "docker daemon dns server",
EnvVar: "PLUGIN_CUSTOM_DNS",
},
cli.StringSliceFlag{
Name: "daemon.dns-search",
Usage: "docker daemon dns search domains",
EnvVar: "PLUGIN_CUSTOM_DNS_SEARCH",
},
cli.BoolFlag{
Name: "daemon.insecure",
Usage: "docker daemon allows insecure registries",
EnvVar: "PLUGIN_INSECURE",
},
cli.BoolFlag{
Name: "daemon.ipv6",
Usage: "docker daemon IPv6 networking",
EnvVar: "PLUGIN_IPV6",
},
cli.BoolFlag{
Name: "daemon.experimental",
Usage: "docker daemon Experimental mode",
EnvVar: "PLUGIN_EXPERIMENTAL",
},
cli.BoolFlag{
Name: "daemon.debug",
Usage: "docker daemon executes in debug mode",
EnvVar: "PLUGIN_DEBUG,DOCKER_LAUNCH_DEBUG",
},
cli.BoolFlag{
Name: "daemon.off",
Usage: "don't start the docker daemon",
EnvVar: "PLUGIN_DAEMON_OFF",
},
cli.StringFlag{
Name: "dockerfile",
Usage: "build dockerfile",
Value: "Dockerfile",
EnvVar: "PLUGIN_DOCKERFILE",
},
cli.StringFlag{
Name: "context",
Usage: "build context",
Value: ".",
EnvVar: "PLUGIN_CONTEXT",
},
cli.StringSliceFlag{
Name: "tags",
Usage: "build tags",
Value: &cli.StringSlice{"latest"},
EnvVar: "PLUGIN_TAG,PLUGIN_TAGS",
FilePath: ".tags",
},
cli.BoolFlag{
Name: "tags.auto",
Usage: "default build tags",
EnvVar: "PLUGIN_DEFAULT_TAGS,PLUGIN_AUTO_TAG",
},
cli.StringFlag{
Name: "tags.suffix",
Usage: "default build tags with suffix",
EnvVar: "PLUGIN_DEFAULT_SUFFIX,PLUGIN_AUTO_TAG_SUFFIX",
},
cli.StringSliceFlag{
Name: "args",
Usage: "build args",
EnvVar: "PLUGIN_BUILD_ARGS",
},
cli.StringSliceFlag{
Name: "args-from-env",
Usage: "build args",
EnvVar: "PLUGIN_BUILD_ARGS_FROM_ENV",
},
cli.BoolFlag{
Name: "quiet",
Usage: "quiet docker build",
EnvVar: "PLUGIN_QUIET",
},
cli.StringFlag{
Name: "target",
Usage: "build target",
EnvVar: "PLUGIN_TARGET",
},
cli.StringSliceFlag{
Name: "cache-from",
Usage: "images to consider as cache sources",
EnvVar: "PLUGIN_CACHE_FROM",
},
cli.BoolFlag{
Name: "squash",
Usage: "squash the layers at build time",
EnvVar: "PLUGIN_SQUASH",
},
cli.BoolTFlag{
Name: "pull-image",
Usage: "force pull base image at build time",
EnvVar: "PLUGIN_PULL_IMAGE",
},
cli.BoolFlag{
Name: "compress",
Usage: "compress the build context using gzip",
EnvVar: "PLUGIN_COMPRESS",
},
cli.StringFlag{
Name: "repo",
Usage: "docker repository",
EnvVar: "PLUGIN_REPO",
},
cli.StringSliceFlag{
Name: "custom-labels",
Usage: "additional k=v labels",
EnvVar: "PLUGIN_CUSTOM_LABELS",
},
cli.StringSliceFlag{
Name: "label-schema",
Usage: "label-schema labels",
EnvVar: "PLUGIN_LABEL_SCHEMA",
},
cli.BoolTFlag{
Name: "auto-label",
Usage: "auto-label true|false",
EnvVar: "PLUGIN_AUTO_LABEL",
},
cli.StringFlag{
Name: "link",
Usage: "link https://example.com/org/repo-name",
EnvVar: "PLUGIN_REPO_LINK,DRONE_REPO_LINK",
},
cli.StringFlag{
Name: "docker.registry",
Usage: "docker registry",
Value: "https://index.docker.io/v1/",
EnvVar: "PLUGIN_REGISTRY,DOCKER_REGISTRY",
},
cli.StringFlag{
Name: "docker.username",
Usage: "docker username",
EnvVar: "PLUGIN_USERNAME,DOCKER_USERNAME",
},
cli.StringFlag{
Name: "docker.password",
Usage: "docker password",
EnvVar: "PLUGIN_PASSWORD,DOCKER_PASSWORD",
},
cli.StringFlag{
Name: "docker.email",
Usage: "docker email",
EnvVar: "PLUGIN_EMAIL,DOCKER_EMAIL",
},
cli.StringFlag{
Name: "docker.config",
Usage: "docker json dockerconfig content",
EnvVar: "PLUGIN_CONFIG,DOCKER_PLUGIN_CONFIG",
},
cli.BoolTFlag{
Name: "docker.purge",
Usage: "docker should cleanup images",
EnvVar: "PLUGIN_PURGE",
},
cli.StringFlag{
Name: "repo.branch",
Usage: "repository default branch",
EnvVar: "DRONE_REPO_BRANCH",
},
cli.BoolFlag{
Name: "no-cache",
Usage: "do not use cached intermediate containers",
EnvVar: "PLUGIN_NO_CACHE",
},
cli.StringSliceFlag{
Name: "add-host",
Usage: "additional host:IP mapping",
EnvVar: "PLUGIN_ADD_HOST",
},
}
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}
func run(c *cli.Context) error {
plugin := docker.Plugin{
Dryrun: c.Bool("dry-run"),
Cleanup: c.BoolT("docker.purge"),
Login: docker.Login{
Registry: c.String("docker.registry"),
Username: c.String("docker.username"),
Password: c.String("docker.password"),
Email: c.String("docker.email"),
Config: c.String("docker.config"),
},
Build: docker.Build{
Remote: c.String("remote.url"),
Name: c.String("commit.sha"),
Dockerfile: c.String("dockerfile"),
Context: c.String("context"),
Tags: c.StringSlice("tags"),
Args: c.StringSlice("args"),
ArgsEnv: c.StringSlice("args-from-env"),
Target: c.String("target"),
Squash: c.Bool("squash"),
Pull: c.BoolT("pull-image"),
CacheFrom: c.StringSlice("cache-from"),
Compress: c.Bool("compress"),
Repo: c.String("repo"),
Labels: c.StringSlice("custom-labels"),
LabelSchema: c.StringSlice("label-schema"),
AutoLabel: c.BoolT("auto-label"),
Link: c.String("link"),
NoCache: c.Bool("no-cache"),
AddHost: c.StringSlice("add-host"),
Quiet: c.Bool("quiet"),
},
Daemon: docker.Daemon{
Registry: c.String("docker.registry"),
Mirror: c.String("daemon.mirror"),
StorageDriver: c.String("daemon.storage-driver"),
StoragePath: c.String("daemon.storage-path"),
Insecure: c.Bool("daemon.insecure"),
Disabled: c.Bool("daemon.off"),
IPv6: c.Bool("daemon.ipv6"),
Debug: c.Bool("daemon.debug"),
Bip: c.String("daemon.bip"),
DNS: c.StringSlice("daemon.dns"),
DNSSearch: c.StringSlice("daemon.dns-search"),
MTU: c.String("daemon.mtu"),
Experimental: c.Bool("daemon.experimental"),
},
}
if c.Bool("tags.auto") {
if docker.UseDefaultTag( // return true if tag event or default branch
c.String("commit.ref"),
c.String("repo.branch"),
) {
tag, err := docker.DefaultTagSuffix(
c.String("commit.ref"),
c.String("tags.suffix"),
)
if err != nil {
logrus.Printf("cannot build docker image for %s, invalid semantic version", c.String("commit.ref"))
return err
}
plugin.Build.Tags = tag
} else {
logrus.Printf("skipping automated docker build for %s", c.String("commit.ref"))
return nil
}
}
return plugin.Exec()
}
| [
"\"PLUGIN_ENV_FILE\""
]
| []
| [
"PLUGIN_ENV_FILE"
]
| [] | ["PLUGIN_ENV_FILE"] | go | 1 | 0 | |
anpr_camera_stream_og.py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import argparse
import csv
import io
import os
from datetime import datetime
from threading import Thread
import cv2
import requests
from PIL import Image
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
def parse_arguments():
parser = argparse.ArgumentParser(
description=
'Read license plates from a RTSP stream and save the result in a CSV file.',
epilog=
'For example: anpr_camera_stream.py --camera rtsp://192.168.1.2:5554/camera --api-key TOKEN --regions fr --output /path/to/output.csv'
)
parser.add_argument('--api-key', help='Your API key.', required=True)
parser.add_argument('--camera', help='RTSP stream url.', required=True)
parser.add_argument(
'--regions',
help='Regions http://docs.platerecognizer.com/#regions-supported.',
required=False)
parser.add_argument('--output', help='CSV output file.', required=True)
parser.add_argument(
'--show-image',
help='Show a window with the frame being sent for recognition.',
action='store_true')
parser.add_argument(
'--inference-server',
help='Server used for recognition. Default to cloud server.',
default='https://api.platerecognizer.com/v1/plate-reader/')
return parser.parse_args()
class ThreadedCamera(object):
def __init__(self, args):
print("camera")
print(args.camera)
self.capture = cv2.VideoCapture(args.camera, cv2.CAP_FFMPEG)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if not self.capture.isOpened():
print('No stream available: ' + args.camera)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
self.frame = None
self.status = False
def update(self):
while self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def get_frame(self,):
if self.frame is None or not self.status:
return
cv2.waitKey(1)
return self.frame
def capture(args, writer):
camera = ThreadedCamera(args)
while camera.capture.isOpened():
frame = camera.get_frame()
print("frame")
print(frame)
if frame is None:
continue
cv2.imshow('frame', frame)
buffer = io.BytesIO()
im = Image.fromarray(frame)
im.save(buffer, 'JPEG')
buffer.seek(0)
response = requests.post(
args.inference_server,
files=dict(upload=buffer),
data=dict(regions=args.regions or ''),
headers={'Authorization': 'Token ' + args.api_key})
res = response.json()
print(frame)
for result in res['results']:
writer.writerow(
dict(date=datetime.today().strftime('%x %X'),
license_plate=result['plate'],
score=result['score'],
dscore=result['dscore'],
vehicle_type=result['vehicle']['type']))
def main():
args = parse_arguments()
with open(args.output, 'w') as output:
fields = ['date', 'license_plate', 'score', 'dscore', 'vehicle_type']
writer = csv.DictWriter(output, fieldnames=fields)
writer.writeheader()
capture(args, writer)
if __name__ == "__main__":
main() | []
| []
| [
"OPENCV_FFMPEG_CAPTURE_OPTIONS"
]
| [] | ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] | python | 1 | 0 | |
pkg/util/kubernetes/kubernetes.go | // Copyright 2017 The nats-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
k8srand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // for gcp auth
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"github.com/nats-io/nats-operator/pkg/apis/nats/v1alpha2"
natsclient "github.com/nats-io/nats-operator/pkg/client/clientset/versioned"
natsalphav2client "github.com/nats-io/nats-operator/pkg/client/clientset/versioned/typed/nats/v1alpha2"
natsconf "github.com/nats-io/nats-operator/pkg/conf"
"github.com/nats-io/nats-operator/pkg/constants"
"github.com/nats-io/nats-operator/pkg/util/retryutil"
"github.com/nats-io/nats-operator/pkg/util/versionCheck"
)
const (
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
versionAnnotationKey = "nats.version"
)
const (
LabelAppKey = "app"
LabelAppValue = "nats"
LabelClusterNameKey = "nats_cluster"
LabelClusterVersionKey = "nats_version"
)
func GetNATSVersion(pod *v1.Pod) string {
return pod.Annotations[versionAnnotationKey]
}
func SetNATSVersion(pod *v1.Pod, version string) {
pod.Annotations[versionAnnotationKey] = version
pod.Labels[LabelClusterVersionKey] = version
}
func GetPodNames(pods []*v1.Pod) []string {
if len(pods) == 0 {
return nil
}
res := []string{}
for _, p := range pods {
res = append(res, p.Name)
}
return res
}
func MakeNATSImage(version string, serverImage string) string {
return fmt.Sprintf("%s:%v", serverImage, version)
}
func PodWithNodeSelector(p *v1.Pod, ns map[string]string) *v1.Pod {
p.Spec.NodeSelector = ns
return p
}
func createService(kubecli corev1client.CoreV1Interface, svcName, clusterName, ns, clusterIP string, ports []v1.ServicePort, owner metav1.OwnerReference, selectors map[string]string, tolerateUnready bool, labels map[string]string, annotations map[string]string) error {
svc := newNatsServiceManifest(svcName, clusterName, clusterIP, ports, selectors, tolerateUnready, labels, annotations)
addOwnerRefToObject(svc.GetObjectMeta(), owner)
_, err := kubecli.Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
return err
}
// ClientServiceName returns the name of the client service based on the specified cluster name.
func ClientServiceName(clusterName string) string {
return clusterName
}
func CreateClientService(
kubecli corev1client.CoreV1Interface,
clusterName, ns string,
owner metav1.OwnerReference,
websocketPort int,
labels map[string]string,
annotations map[string]string,
) error {
ports := []v1.ServicePort{
{
Name: "client",
Port: constants.ClientPort,
TargetPort: intstr.FromInt(constants.ClientPort),
Protocol: v1.ProtocolTCP,
},
}
if websocketPort > 0 {
ports = append(ports, v1.ServicePort{
Name: "websocket",
Port: int32(websocketPort),
TargetPort: intstr.FromInt(websocketPort),
Protocol: v1.ProtocolTCP,
})
}
selectors := LabelsForCluster(clusterName)
return createService(kubecli, ClientServiceName(clusterName), clusterName, ns, "", ports, owner, selectors, false, labels, annotations)
}
func ManagementServiceName(clusterName string) string {
return clusterName + "-mgmt"
}
// CreateMgmtService creates an headless service for NATS management purposes.
func CreateMgmtService(
kubecli corev1client.CoreV1Interface,
clusterName, clusterVersion, ns string,
owner metav1.OwnerReference,
websocketPort int,
gatewayPort int,
leafnodePort int,
labels map[string]string,
annotations map[string]string,
) error {
ports := []v1.ServicePort{
{
Name: "cluster",
Port: constants.ClusterPort,
TargetPort: intstr.FromInt(constants.ClusterPort),
Protocol: v1.ProtocolTCP,
},
{
Name: "monitoring",
Port: constants.MonitoringPort,
TargetPort: intstr.FromInt(constants.MonitoringPort),
Protocol: v1.ProtocolTCP,
},
{
Name: "metrics",
Port: constants.MetricsPort,
TargetPort: intstr.FromInt(constants.MetricsPort),
Protocol: v1.ProtocolTCP,
},
}
if websocketPort > 0 {
ports = append(ports, v1.ServicePort{
Name: "websocket",
Port: int32(websocketPort),
TargetPort: intstr.FromInt(websocketPort),
Protocol: v1.ProtocolTCP,
})
}
if gatewayPort > 0 {
ports = append(ports, v1.ServicePort{
Name: "gateways",
Port: int32(gatewayPort),
TargetPort: intstr.FromInt(gatewayPort),
Protocol: v1.ProtocolTCP,
})
}
if leafnodePort > 0 {
ports = append(ports, v1.ServicePort{
Name: "leafnodes",
Port: int32(leafnodePort),
TargetPort: intstr.FromInt(leafnodePort),
Protocol: v1.ProtocolTCP,
})
}
selectors := LabelsForCluster(clusterName)
selectors[LabelClusterVersionKey] = clusterVersion
return createService(kubecli, ManagementServiceName(clusterName), clusterName, ns, v1.ClusterIPNone, ports, owner, selectors, true, labels, annotations)
}
// addTLSConfig fills in the TLS configuration to be used in the config map.
func addTLSConfig(sconfig *natsconf.ServerConfig, cs v1alpha2.ClusterSpec) {
if cs.TLS == nil {
return
}
if cs.TLS.EnableHttps {
// Replace monitoring port with https one.
sconfig.HTTPSPort = int(constants.MonitoringPort)
sconfig.HTTPPort = 0
}
if cs.TLS.ServerSecret != "" {
sconfig.TLS = &natsconf.TLSConfig{
CAFile: constants.ServerCertsMountPath + "/" + cs.TLS.ServerSecretCAFileName,
CertFile: constants.ServerCertsMountPath + "/" + cs.TLS.ServerSecretCertFileName,
KeyFile: constants.ServerCertsMountPath + "/" + cs.TLS.ServerSecretKeyFileName,
}
if cs.TLS.ClientsTLSTimeout > 0 {
sconfig.TLS.Timeout = cs.TLS.ClientsTLSTimeout
}
// Verifying clients cert is disabled by default.
sconfig.TLS.Verify = cs.TLS.Verify
// Customize cipher suites and curve preferences.
sconfig.TLS.CipherSuites = cs.TLS.CipherSuites
sconfig.TLS.CurvePreferences = cs.TLS.CurvePreferences
}
if cs.TLS.RoutesSecret != "" {
sconfig.Cluster.TLS = &natsconf.TLSConfig{
CAFile: resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretCAFileName),
CertFile: resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretCertFileName),
KeyFile: resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretKeyFileName),
}
if cs.TLS.RoutesTLSTimeout > 0 {
sconfig.Cluster.TLS.Timeout = cs.TLS.RoutesTLSTimeout
}
}
if cs.TLS.GatewaySecret != "" {
sconfig.Gateway.TLS = &natsconf.TLSConfig{
CAFile: resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretCAFileName),
CertFile: resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretCertFileName),
KeyFile: resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretKeyFileName),
}
if cs.TLS.GatewaysTLSTimeout > 0 {
sconfig.Gateway.TLS.Timeout = cs.TLS.GatewaysTLSTimeout
}
}
if cs.TLS.LeafnodeSecret != "" {
sconfig.LeafNode.TLS = &natsconf.TLSConfig{
CAFile: resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretCAFileName),
CertFile: resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretCertFileName),
KeyFile: resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretKeyFileName),
}
timeout := cs.TLS.LeafnodesTLSTimeout
if timeout > 0 {
sconfig.LeafNode.TLS.Timeout = timeout
}
}
if cs.TLS.WebsocketSecret != "" {
sconfig.Websocket.TLS = &natsconf.TLSConfig{
CAFile: resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretCAFileName),
CertFile: resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretCertFileName),
KeyFile: resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretKeyFileName),
}
timeout := cs.TLS.WebsocketTLSTimeout
if timeout > 0 {
sconfig.Websocket.TLS.Timeout = timeout
}
}
if cs.Auth != nil && cs.Auth.TLSVerifyAndMap {
sconfig.TLS.VerifyAndMap = true
}
}
func addGatewayAuthConfig(
kubecli corev1client.CoreV1Interface,
ns string,
sconfig *natsconf.ServerConfig,
cs v1alpha2.ClusterSpec,
) error {
if cs.Auth == nil || cs.GatewayConfig == nil {
return nil
}
if cs.Auth.GatewayAuthSecret != "" {
result, err := kubecli.Secrets(ns).Get(context.TODO(), cs.Auth.GatewayAuthSecret, metav1.GetOptions{})
if err != nil {
return err
}
var gatewayAuth *natsconf.AuthorizationConfig
for _, v := range result.Data {
err := json.Unmarshal(v, &gatewayAuth)
if err != nil {
return err
}
if cs.Auth.GatewayAuthTimeout > 0 {
gatewayAuth.Timeout = cs.Auth.GatewayAuthTimeout
}
sconfig.Gateway.Authorization = gatewayAuth
break
}
return nil
}
return nil
}
func addClusterAuthConfig(
kubecli corev1client.CoreV1Interface,
ns string,
sconfig *natsconf.ServerConfig,
cs v1alpha2.ClusterSpec,
) error {
if cs.Auth == nil {
return nil
}
if cs.Auth.ClusterAuthSecret != "" {
result, err := kubecli.Secrets(ns).Get(context.TODO(), cs.Auth.ClusterAuthSecret, metav1.GetOptions{})
if err != nil {
return err
}
var clusterAuth *natsconf.AuthorizationConfig
for _, v := range result.Data {
err := json.Unmarshal(v, &clusterAuth)
if err != nil {
return err
}
if cs.Auth.ClusterAuthTimeout > 0 {
clusterAuth.Timeout = cs.Auth.ClusterAuthTimeout
}
sconfig.Cluster.Authorization = clusterAuth
break
}
return nil
}
return nil
}
func addAuthConfig(
kubecli corev1client.CoreV1Interface,
operatorcli natsalphav2client.NatsV1alpha2Interface,
ns string,
clusterName string,
sconfig *natsconf.ServerConfig,
cs v1alpha2.ClusterSpec,
owner metav1.OwnerReference,
) error {
if cs.Auth == nil {
return nil
}
if cs.Auth.EnableServiceAccounts {
roleSelector := map[string]string{
LabelClusterNameKey: clusterName,
}
users := make([]*natsconf.User, 0)
roles, err := operatorcli.NatsServiceRoles(ns).List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(roleSelector).String(),
})
if err != nil {
return err
}
namespaces, err := kubecli.Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, nsObj := range namespaces.Items {
ns = nsObj.Name
for _, role := range roles.Items {
// Lookup for a ServiceAccount with the same name as the NatsServiceRole.
sa, err := kubecli.ServiceAccounts(ns).Get(context.TODO(), role.Name, metav1.GetOptions{})
if err != nil {
// TODO: Collect created secrets when the service account no
// longer exists, currently only deleted when the NatsServiceRole
// is deleted since it is the owner of the object.
// Skip since cannot map unless valid service account is found.
continue
}
// TODO: Add support for expiration of the issued tokens.
tokenSecretName := fmt.Sprintf("%s-%s-bound-token", role.Name, clusterName)
cs, err := kubecli.Secrets(ns).Get(context.TODO(), tokenSecretName, metav1.GetOptions{})
if err == nil {
// We always get everything and apply, in case there is a diff
// then the reloader will apply them.
user := &natsconf.User{
User: role.Name,
Password: string(cs.Data["token"]),
Permissions: &natsconf.Permissions{
Publish: role.Spec.Permissions.Publish,
Subscribe: role.Spec.Permissions.Subscribe,
},
}
users = append(users, user)
continue
}
// Create the secret, then make a service token request, and finally
// update the secret with the token mapped to the service account.
tokenSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: tokenSecretName,
Labels: LabelsForCluster(clusterName),
},
}
// When the role that was mapped is deleted, then also delete the secret.
addOwnerRefToObject(tokenSecret.GetObjectMeta(), role.AsOwner())
tokenSecret, err = kubecli.Secrets(ns).Create(context.TODO(), tokenSecret, metav1.CreateOptions{})
if err != nil {
return err
}
// Issue token with audience set for the NATS cluster in this namespace only,
// this will prevent the token from being usable against the API Server.
ar := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{fmt.Sprintf("nats://%s.%s.svc", clusterName, ns)},
// Service Token will be valid for as long as the created secret exists.
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "Secret",
APIVersion: "v1",
Name: tokenSecret.Name,
UID: tokenSecret.UID,
},
},
}
tr, err := kubecli.ServiceAccounts(ns).CreateToken(context.TODO(), sa.Name, ar, metav1.CreateOptions{})
if err != nil {
return err
}
if err == nil {
// Update secret with issued token, then save the user in the NATS Config.
token := tr.Status.Token
tokenSecret.Data = map[string][]byte{
"token": []byte(token),
}
tokenSecret, err = kubecli.Secrets(ns).Update(context.TODO(), tokenSecret, metav1.UpdateOptions{})
if err != nil {
return err
}
user := &natsconf.User{
User: role.Name,
Password: string(token),
Permissions: &natsconf.Permissions{
Publish: role.Spec.Permissions.Publish,
Subscribe: role.Spec.Permissions.Subscribe,
},
}
users = append(users, user)
}
}
}
// Expand authorization rules from the service account tokens.
sconfig.Authorization = &natsconf.AuthorizationConfig{
Users: users,
}
return nil
} else if cs.Auth.ClientsAuthSecret != "" {
// Authorization implementation using a secret with the explicit
// configuration of all the accounts from a cluster, cannot be
// used together with service accounts.
result, err := kubecli.Secrets(ns).Get(context.TODO(), cs.Auth.ClientsAuthSecret, metav1.GetOptions{})
if err != nil {
return err
}
var clientAuth *natsconf.AuthorizationConfig
for _, v := range result.Data {
err := json.Unmarshal(v, &clientAuth)
if err != nil {
return err
}
if cs.Auth.ClientsAuthTimeout > 0 {
clientAuth.Timeout = cs.Auth.ClientsAuthTimeout
}
sconfig.Authorization = clientAuth
break
}
return nil
} else if cs.Auth.ClientsAuthFile != "" {
sconfig.Authorization = &natsconf.AuthorizationConfig{
Include: cs.Auth.ClientsAuthFile,
}
return nil
}
return nil
}
func addGatewayConfig(sconfig *natsconf.ServerConfig, cluster v1alpha2.ClusterSpec) {
gateways := make([]*natsconf.RemoteGatewayOpts, 0)
for _, gw := range cluster.GatewayConfig.Gateways {
sgw := &natsconf.RemoteGatewayOpts{
Name: gw.Name,
URL: gw.URL,
}
gateways = append(gateways, sgw)
}
sconfig.Gateway = &natsconf.GatewayConfig{
Name: cluster.GatewayConfig.Name,
Port: cluster.GatewayConfig.Port,
Gateways: gateways,
RejectUnknown: cluster.GatewayConfig.RejectUnknown,
}
if cluster.Pod != nil && cluster.Pod.AdvertiseExternalIP {
sconfig.Gateway.Include = filepath.Join(".", constants.BootConfigGatewayFilePath)
}
return
}
func addLeafnodeConfig(sconfig *natsconf.ServerConfig, cluster v1alpha2.ClusterSpec) {
sconfig.LeafNode = &natsconf.LeafNodeServerConfig{
Port: cluster.LeafNodeConfig.Port,
}
for _, r := range cluster.LeafNodeConfig.Remotes {
var urls []string
if r.URL != "" {
urls = append(urls, r.URL)
}
if len(r.URLs) > 0 {
urls = append(urls, r.URLs...)
}
sconfig.LeafNode.Remotes = append(sconfig.LeafNode.Remotes, natsconf.LeafNodeRemote{
URLs: urls,
Credentials: r.Credentials,
})
}
if cluster.Pod != nil && cluster.Pod.AdvertiseExternalIP {
sconfig.LeafNode.Include = filepath.Join(".", constants.BootConfigGatewayFilePath)
}
return
}
func addWebsocketConfig(sconfig *natsconf.ServerConfig, cluster v1alpha2.ClusterSpec) {
if cluster.WebsocketConfig == nil {
return
}
sconfig.Websocket = &natsconf.WebsocketConfig{
Listen: fmt.Sprintf(":%d", cluster.WebsocketConfig.Port),
HandshakeTimeout: cluster.WebsocketConfig.HandshakeTimeout,
Compression: cluster.WebsocketConfig.Compression,
}
// WebsocketConfig.TLS added in addTLSConfig later.
}
// addOperatorConfig fills in the operator configuration to be used in the config map.
func addOperatorConfig(sconfig *natsconf.ServerConfig, cs v1alpha2.ClusterSpec) {
if cs.OperatorConfig == nil {
return
}
sconfig.JWT = filepath.Join(constants.OperatorJWTMountPath, constants.DefaultOperatorJWTFileName)
sconfig.SystemAccount = cs.OperatorConfig.SystemAccount
sconfig.Resolver = cs.OperatorConfig.Resolver
}
// CreateAndWaitPod is an util for testing.
// We should eventually get rid of this in critical code path and move it to test util.
func CreateAndWaitPod(kubecli corev1client.CoreV1Interface, ns string, pod *v1.Pod, timeout time.Duration) (*v1.Pod, error) {
_, err := kubecli.Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return nil, err
}
interval := 5 * time.Second
var retPod *v1.Pod
err = retryutil.Retry(interval, int(timeout/(interval)), func() (bool, error) {
retPod, err = kubecli.Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
switch retPod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodPending:
return false, nil
default:
return false, fmt.Errorf("unexpected pod status.phase: %v", retPod.Status.Phase)
}
})
if err != nil {
if retryutil.IsRetryFailure(err) {
return nil, fmt.Errorf("failed to wait pod running, it is still pending: %v", err)
}
return nil, fmt.Errorf("failed to wait pod running: %v", err)
}
return retPod, nil
}
// ConfigSecret returns the name of the secret that contains the configuration for the NATS cluster with the specified name.
func ConfigSecret(clusterName string) string {
return clusterName
}
// CreateConfigSecret creates the secret that contains the configuration file for a given NATS cluster..
func CreateConfigSecret(kubecli corev1client.CoreV1Interface, operatorcli natsalphav2client.NatsV1alpha2Interface, clusterName, ns string, cluster v1alpha2.ClusterSpec, owner metav1.OwnerReference) error {
sconfig := &natsconf.ServerConfig{
Port: int(constants.ClientPort),
HTTPPort: int(constants.MonitoringPort),
Cluster: &natsconf.ClusterConfig{
Port: int(constants.ClusterPort),
},
}
if cluster.ExtraRoutes != nil {
routes := make([]string, 0)
for _, extraCluster := range cluster.ExtraRoutes {
switch {
case extraCluster.Route != "":
// If route is explicit just include as is.
routes = append(routes, extraCluster.Route)
case extraCluster.Cluster != "":
route := fmt.Sprintf("nats://%s:%d",
ManagementServiceName(extraCluster.Cluster),
constants.ClusterPort)
routes = append(routes, route)
}
}
sconfig.Cluster.Routes = routes
}
if cluster.UseServerName {
sconfig.ServerName = "$SERVER_NAME"
}
addConfig(sconfig, cluster)
err := addAuthConfig(kubecli, operatorcli, ns, clusterName, sconfig, cluster, owner)
if err != nil {
return err
}
err = addClusterAuthConfig(kubecli, ns, sconfig, cluster)
if err != nil {
return err
}
err = addGatewayAuthConfig(kubecli, ns, sconfig, cluster)
if err != nil {
return err
}
rawConfig, err := natsconf.Marshal(sconfig)
if err != nil {
return err
}
if cluster.UseServerName {
rawConfig = bytes.Replace(rawConfig, []byte(`"$SERVER_NAME"`), []byte("$SERVER_NAME"), -1)
}
// FIXME: Quoted "include" causes include to be ignored.
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), -1)
labels := LabelsForCluster(clusterName)
cm := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: ConfigSecret(clusterName),
Labels: labels,
},
Data: map[string][]byte{
constants.ConfigFileName: rawConfig,
},
}
addOwnerRefToObject(cm.GetObjectMeta(), owner)
_, err = kubecli.Secrets(ns).Create(context.TODO(), cm, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
// Skip in case it was created already and update instead
// with the latest configuration.
_, err = kubecli.Secrets(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
return err
}
return nil
}
// UpdateConfigSecret applies the new configuration of the cluster,
// such as modifying the routes available in the cluster.
func UpdateConfigSecret(
kubecli corev1client.CoreV1Interface,
operatorcli natsalphav2client.NatsV1alpha2Interface,
clusterName, ns string,
cluster v1alpha2.ClusterSpec,
owner metav1.OwnerReference,
) error {
sconfig := &natsconf.ServerConfig{
Port: int(constants.ClientPort),
HTTPPort: int(constants.MonitoringPort),
Cluster: &natsconf.ClusterConfig{
Port: int(constants.ClusterPort),
},
}
// We need to add cluster auth config before generating routes, since we need auth info there
err := addClusterAuthConfig(kubecli, ns, sconfig, cluster)
if err != nil {
return err
}
// List all available pods then generate the routes
// for the NATS cluster.
routes := make([]string, 0)
podList, err := kubecli.Pods(ns).List(context.TODO(), ClusterListOpt(clusterName))
if err != nil {
return err
}
for _, pod := range podList.Items {
var route string
// Skip pods that have failed
switch pod.Status.Phase {
case "Failed":
continue
}
if sconfig.Cluster.Authorization != nil {
route = fmt.Sprintf("nats://%s:%s@%s.%s.%s.svc:%d",
sconfig.Cluster.Authorization.Username, sconfig.Cluster.Authorization.Password, pod.Name, ManagementServiceName(clusterName), ns, constants.ClusterPort)
} else {
route = fmt.Sprintf("nats://%s.%s.%s.svc:%d",
pod.Name, ManagementServiceName(clusterName), ns, constants.ClusterPort)
}
routes = append(routes, route)
}
if cluster.ExtraRoutes != nil {
for _, extraCluster := range cluster.ExtraRoutes {
switch {
case extraCluster.Route != "":
// If route is explicit just include as is.
routes = append(routes, extraCluster.Route)
case extraCluster.Cluster != "":
route := fmt.Sprintf("nats://%s:%d",
ManagementServiceName(extraCluster.Cluster),
constants.ClusterPort)
routes = append(routes, route)
}
}
}
sconfig.Cluster.Routes = routes
if cluster.UseServerName {
sconfig.ServerName = "$SERVER_NAME"
}
addConfig(sconfig, cluster)
err = addAuthConfig(kubecli, operatorcli, ns, clusterName, sconfig, cluster, owner)
if err != nil {
return err
}
err = addGatewayAuthConfig(kubecli, ns, sconfig, cluster)
if err != nil {
return err
}
rawConfig, err := natsconf.Marshal(sconfig)
if err != nil {
return err
}
// FIXME: Quoted "include" causes include to be ignored.
rawConfig = bytes.Replace(rawConfig, []byte(`"include":`), []byte("include "), -1)
// Replace server name so that it is unquoted and evaled as an env var.
if cluster.UseServerName {
rawConfig = bytes.Replace(rawConfig, []byte(`"$SERVER_NAME"`), []byte("$SERVER_NAME"), -1)
}
cm, err := kubecli.Secrets(ns).Get(context.TODO(), clusterName, metav1.GetOptions{})
if err != nil {
return err
}
// Make sure that the secret has the required labels.
if cm.Labels == nil {
cm.Labels = make(map[string]string)
}
for key, val := range LabelsForCluster(clusterName) {
cm.Labels[key] = val
}
// Update the configuration.
cm.Data[constants.ConfigFileName] = rawConfig
_, err = kubecli.Secrets(ns).Update(context.TODO(), cm, metav1.UpdateOptions{})
return err
}
func addConfig(sconfig *natsconf.ServerConfig, cluster v1alpha2.ClusterSpec) {
if cluster.ServerConfig != nil {
sconfig.Debug = cluster.ServerConfig.Debug
sconfig.Trace = cluster.ServerConfig.Trace
sconfig.WriteDeadline = cluster.ServerConfig.WriteDeadline
sconfig.MaxConnections = cluster.ServerConfig.MaxConnections
sconfig.MaxPayload = cluster.ServerConfig.MaxPayload
sconfig.MaxPending = cluster.ServerConfig.MaxPending
sconfig.MaxSubscriptions = cluster.ServerConfig.MaxSubscriptions
sconfig.MaxControlLine = cluster.ServerConfig.MaxControlLine
sconfig.Logtime = !cluster.ServerConfig.DisableLogtime
} else {
sconfig.Logtime = true
}
// Observe .spec.lameDuckDurationSeconds if specified.
if cluster.LameDuckDurationSeconds != nil {
sconfig.LameDuckDuration = fmt.Sprintf("%ds", *cluster.LameDuckDurationSeconds)
}
if cluster.Pod != nil && cluster.Pod.AdvertiseExternalIP {
sconfig.Include = filepath.Join(".", constants.BootConfigFilePath)
}
if cluster.GatewayConfig != nil {
addGatewayConfig(sconfig, cluster)
}
if cluster.LeafNodeConfig != nil {
addLeafnodeConfig(sconfig, cluster)
}
if cluster.WebsocketConfig != nil {
addWebsocketConfig(sconfig, cluster)
}
addTLSConfig(sconfig, cluster)
addOperatorConfig(sconfig, cluster)
}
func newNatsConfigMapVolume(clusterName string) v1.Volume {
return v1.Volume{
Name: constants.ConfigMapVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: clusterName,
},
},
}
}
func newNatsConfigMapVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.ConfigMapVolumeName,
MountPath: constants.ConfigMapMountPath,
}
}
func newNatsPidFileVolume() v1.Volume {
return v1.Volume{
Name: constants.PidFileVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}
}
func newNatsPidFileVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.PidFileVolumeName,
MountPath: constants.PidFileMountPath,
}
}
func newNatsServiceManifest(svcName, clusterName, clusterIP string, ports []v1.ServicePort, selectors map[string]string, tolerateUnready bool, labels map[string]string, annotations map[string]string) *v1.Service {
if labels == nil {
labels = make(map[string]string)
}
labels[LabelAppKey] = LabelAppValue
labels[LabelClusterNameKey] = clusterName
if annotations == nil {
annotations = make(map[string]string)
}
if tolerateUnready == true {
annotations[TolerateUnreadyEndpointsAnnotation] = "true"
}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Labels: labels,
Annotations: annotations,
},
Spec: v1.ServiceSpec{
Ports: ports,
Selector: selectors,
ClusterIP: clusterIP,
},
}
return svc
}
func newNatsServerSecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.ServerSecretVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsServerSecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.ServerSecretVolumeName,
MountPath: constants.ServerCertsMountPath,
}
}
func newNatsRoutesSecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.RoutesSecretVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsRoutesSecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.RoutesSecretVolumeName,
MountPath: constants.RoutesCertsMountPath,
}
}
func newNatsGatewaySecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.GatewaySecretVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsGatewaySecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.GatewaySecretVolumeName,
MountPath: constants.GatewayCertsMountPath,
}
}
func newNatsLeafnodeSecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.LeafnodeSecretVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsLeafnodeSecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.LeafnodeSecretVolumeName,
MountPath: constants.LeafnodeCertsMountPath,
}
}
func newNatsWebsocketSecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.WebsocketSecretVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsWebsocketSecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.WebsocketSecretVolumeName,
MountPath: constants.WebsocketCertsMountPath,
}
}
func newNatsOperatorJWTSecretVolume(secretName string) v1.Volume {
return v1.Volume{
Name: constants.OperatorJWTVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
}
}
func newNatsOperatorJWTSecretVolumeMount() v1.VolumeMount {
return v1.VolumeMount{
Name: constants.OperatorJWTVolumeName,
MountPath: constants.OperatorJWTMountPath,
}
}
func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) {
o.SetOwnerReferences(append(o.GetOwnerReferences(), r))
}
// NewNatsPodSpec returns a NATS peer pod specification, based on the cluster specification.
func NewNatsPodSpec(namespace, name, clusterName string, cs v1alpha2.ClusterSpec, owner metav1.OwnerReference) *v1.Pod {
var (
annotations = map[string]string{}
containers = make([]v1.Container, 0)
volumes = make([]v1.Volume, 0)
volumeMounts = make([]v1.VolumeMount, 0)
labels = map[string]string{
LabelAppKey: "nats",
LabelClusterNameKey: clusterName,
LabelClusterVersionKey: cs.Version,
}
)
// hostPortsEnabled with default values not breaking current behaviour
hostPortsEnabled := hostPorts{clients: false, gateways: true, leafnodes: true, websockets: true}
// ConfigMap: Volume declaration for the Pod and Container.
volume := newNatsConfigMapVolume(clusterName)
volumes = append(volumes, volume)
volumeMount := newNatsConfigMapVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
// Extra mount to share the pid file from server
volume = newNatsPidFileVolume()
volumes = append(volumes, volume)
volumeMount = newNatsPidFileVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
if cs.Pod != nil {
// User supplied volumes and mounts
volumeMounts = append(volumeMounts, cs.Pod.VolumeMounts...)
hostPortsEnabled.clients = cs.Pod.EnableClientsHostPort
// Disabling HostPorts makes no sense when AdvertiseExternalIP is true
if !cs.Pod.AdvertiseExternalIP {
if cs.Pod.DisableGatewaysHostPort {
hostPortsEnabled.gateways = false
}
if cs.Pod.DisableLeafnodesHostPort {
hostPortsEnabled.leafnodes = false
}
if cs.Pod.DisableWebsocketsHostPort {
hostPortsEnabled.websockets = false
}
}
}
var gatewayPort int
if cs.GatewayConfig != nil {
gatewayPort = cs.GatewayConfig.Port
}
var leafnodePort int
if cs.LeafNodeConfig != nil {
leafnodePort = cs.LeafNodeConfig.Port
}
var websocketPort int
if cs.WebsocketConfig != nil {
websocketPort = cs.WebsocketConfig.Port
}
// Initialize the pod spec with a template in case it is present.
spec := &v1.PodSpec{}
if cs.PodTemplate != nil {
spec = cs.PodTemplate.Spec.DeepCopy()
if spec.Containers != nil && len(spec.Containers) > 0 {
containers = spec.Containers
}
}
// First container has to be the NATS container
var container v1.Container
if len(spec.Containers) > 0 {
container = spec.Containers[0]
} else {
container = v1.Container{}
}
container = natsPodContainer(
container,
clusterName,
cs.Version,
cs.ServerImage,
hostPortsEnabled,
gatewayPort,
leafnodePort,
websocketPort,
)
container = containerWithLivenessProbe(container, natsLivenessProbe(cs))
// In case TLS was enabled as part of the NATS cluster
// configuration then should include the configuration here.
if cs.TLS != nil {
if cs.TLS.ServerSecret != "" {
volume = newNatsServerSecretVolume(cs.TLS.ServerSecret)
volumes = append(volumes, volume)
volumeMount := newNatsServerSecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
if cs.TLS.RoutesSecret != "" {
volume = newNatsRoutesSecretVolume(cs.TLS.RoutesSecret)
volumes = append(volumes, volume)
volumeMount := newNatsRoutesSecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
if cs.TLS.GatewaySecret != "" {
volume = newNatsGatewaySecretVolume(cs.TLS.GatewaySecret)
volumes = append(volumes, volume)
volumeMount := newNatsGatewaySecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
if cs.TLS.LeafnodeSecret != "" {
volume = newNatsLeafnodeSecretVolume(cs.TLS.LeafnodeSecret)
volumes = append(volumes, volume)
volumeMount := newNatsLeafnodeSecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
if cs.TLS.WebsocketSecret != "" {
volume = newNatsWebsocketSecretVolume(cs.TLS.WebsocketSecret)
volumes = append(volumes, volume)
volumeMount := newNatsWebsocketSecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
}
if cs.OperatorConfig != nil {
volume = newNatsOperatorJWTSecretVolume(cs.OperatorConfig.Secret)
volumes = append(volumes, volume)
volumeMount := newNatsOperatorJWTSecretVolumeMount()
volumeMounts = append(volumeMounts, volumeMount)
}
// Configure initializer container to resolve the external ip
// from the pod.
var (
advertiseExternalIP bool = cs.Pod != nil && cs.Pod.AdvertiseExternalIP
bootconfig v1.Container
)
if advertiseExternalIP {
var (
bootconfigImage = constants.DefaultBootconfigImage
bootconfigImageTag = constants.DefaultBootconfigImageTag
)
if cs.Pod.BootConfigContainerImage != "" {
bootconfigImage = cs.Pod.BootConfigContainerImage
}
if cs.Pod.BootConfigContainerImageTag != "" {
bootconfigImageTag = cs.Pod.BootConfigContainerImageTag
}
image := fmt.Sprintf("%s:%s", bootconfigImage, bootconfigImageTag)
bootconfig = v1.Container{
Name: "bootconfig",
Image: image,
}
bootconfig.Env = []v1.EnvVar{
{
Name: "KUBERNETES_NODE_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
}
// Add the empty directory mount for the pod, nats
// container and init container
mount := v1.VolumeMount{
Name: "advertiseconfig",
MountPath: "/etc/nats-config/advertise",
SubPath: "advertise",
}
bootconfig.VolumeMounts = []v1.VolumeMount{mount}
volumeMounts = append(volumeMounts, mount)
volume := v1.Volume{
Name: "advertiseconfig",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}
volumes = append(volumes, volume)
bootconfig.Command = []string{
"nats-pod-bootconfig",
"-f", filepath.Join(constants.ConfigMapMountPath, constants.BootConfigFilePath),
"-gf", filepath.Join(constants.ConfigMapMountPath, constants.BootConfigGatewayFilePath),
}
}
if container.VolumeMounts == nil {
container.VolumeMounts = volumeMounts
} else {
container.VolumeMounts = append(container.VolumeMounts, volumeMounts...)
}
if cs.Pod != nil {
container = containerWithRequirements(container, cs.Pod.Resources)
}
// Grab the A record that will correspond to the current pod
// so we can use it as the cluster advertise host.
// This helps with avoiding route connection errors in TLS-enabled clusters.
advertiseHost := fmt.Sprintf("%s.%s.%s.svc", name, ManagementServiceName(clusterName), namespace)
// Rely on the shared configuration map for configuring the cluster.
retries := strconv.Itoa(constants.ConnectRetries)
cmd := []string{
versionCheck.ServerBinaryPath(cs.Version),
"-c",
constants.ConfigFilePath,
"-P",
constants.PidFilePath,
"--cluster_advertise",
advertiseHost,
"--connect_retries",
retries,
}
if cs.NoAdvertise {
cmd = append(cmd, "--no_advertise")
}
container.Command = cmd
// If there were containers defined already, then replace the NATS container.
if len(containers) > 0 {
containers[0] = container
} else {
containers = append(containers, container)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
}
pod.Spec = *spec
// Required overrides.
pod.Spec.Hostname = name
pod.Spec.Subdomain = ManagementServiceName(clusterName)
// Set default restart policy
if pod.Spec.RestartPolicy == "" {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
}
if advertiseExternalIP {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, bootconfig)
}
// Enable PID namespace sharing and attach sidecar that
// reloads the server whenever the config file is updated.
if cs.Pod != nil && cs.Pod.EnableConfigReload {
pod.Spec.ShareProcessNamespace = &[]bool{true}[0]
// Allow customizing reloader image
image := constants.DefaultReloaderImage
imageTag := constants.DefaultReloaderImageTag
imagePullPolicy := constants.DefaultReloaderImagePullPolicy
if cs.Pod.ReloaderImage != "" {
image = cs.Pod.ReloaderImage
}
if cs.Pod.ReloaderImageTag != "" {
imageTag = cs.Pod.ReloaderImageTag
}
if cs.Pod.ReloaderImagePullPolicy != "" {
imagePullPolicy = cs.Pod.ReloaderImagePullPolicy
}
var reloadTarget []string
if cs.Auth != nil {
if cs.Auth.ClientsAuthFile != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.ConfigMapMountPath, cs.Auth.ClientsAuthFile))
}
}
if cs.TLS != nil {
if cs.TLS.ServerSecret != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.ServerCertsMountPath, cs.TLS.ServerSecretCAFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.ServerCertsMountPath, cs.TLS.ServerSecretCertFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.ServerCertsMountPath, cs.TLS.ServerSecretKeyFileName))
}
if cs.TLS.RoutesSecret != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretCAFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretCertFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.RoutesCertsMountPath, cs.TLS.RoutesSecretKeyFileName))
}
if cs.TLS.GatewaySecret != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretCAFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretCertFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.GatewayCertsMountPath, cs.TLS.GatewaySecretKeyFileName))
}
if cs.TLS.LeafnodeSecret != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretCAFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretCertFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.LeafnodeCertsMountPath, cs.TLS.LeafnodeSecretKeyFileName))
}
if cs.TLS.WebsocketSecret != "" {
reloadTarget = append(reloadTarget, resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretCAFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretCertFileName))
reloadTarget = append(reloadTarget, resolvePath(constants.WebsocketCertsMountPath, cs.TLS.WebsocketSecretKeyFileName))
}
}
reloaderContainer := natsPodReloaderContainer(image, imageTag, imagePullPolicy, cs.Pod.ReloaderResources, reloadTarget...)
reloaderContainer.VolumeMounts = volumeMounts
containers = append(containers, reloaderContainer)
}
if cs.Pod != nil && cs.Pod.EnableMetrics {
// Add pod annotations for prometheus metrics
pod.ObjectMeta.Annotations["prometheus.io/scrape"] = "true"
pod.ObjectMeta.Annotations["prometheus.io/port"] = strconv.Itoa(constants.MetricsPort)
// Allow customizing promethues metrics exporter image
image := constants.DefaultMetricsImage
imageTag := constants.DefaultMetricsImageTag
imagePullPolicy := constants.DefaultMetricsImagePullPolicy
if cs.Pod.MetricsImage != "" {
image = cs.Pod.MetricsImage
}
if cs.Pod.MetricsImageTag != "" {
imageTag = cs.Pod.MetricsImageTag
}
if cs.Pod.MetricsImagePullPolicy != "" {
imagePullPolicy = cs.Pod.MetricsImagePullPolicy
}
metricsContainer := natsPodMetricsContainer(image, imageTag, imagePullPolicy)
containers = append(containers, metricsContainer)
}
// pod.Spec.Containers = append(pod.Spec.Containers, containers...)
pod.Spec.Containers = containers
pod.Spec.Volumes = append(pod.Spec.Volumes, volumes...)
applyPodPolicy(clusterName, pod, cs.Pod)
SetNATSVersion(pod, cs.Version)
addOwnerRefToObject(pod.GetObjectMeta(), owner)
return pod
}
// MustNewKubeConfig builds a configuration object by either reading from the specified kubeconfig file or by using an in-cluster config.
func MustNewKubeConfig(kubeconfig string) *rest.Config {
var (
cfg *rest.Config
err error
)
if len(kubeconfig) == 0 {
cfg, err = InClusterConfig()
} else {
cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
}
if err != nil {
panic(err)
}
return cfg
}
// MustNewKubeClientFromConfig builds a Kubernetes client based on the specified configuration object.
func MustNewKubeClientFromConfig(cfg *rest.Config) kubernetes.Interface {
return kubernetes.NewForConfigOrDie(cfg)
}
// MustNewNatsClientFromConfig builds a client for our API based on the specified configuration object.
func MustNewNatsClientFromConfig(cfg *rest.Config) natsclient.Interface {
return natsclient.NewForConfigOrDie(cfg)
}
func InClusterConfig() (*rest.Config, error) {
// Work around https://github.com/kubernetes/kubernetes/issues/40973
if len(os.Getenv("KUBERNETES_SERVICE_HOST")) == 0 {
addrs, err := net.LookupHost("kubernetes.default.svc")
if err != nil {
panic(err)
}
os.Setenv("KUBERNETES_SERVICE_HOST", addrs[0])
}
if len(os.Getenv("KUBERNETES_SERVICE_PORT")) == 0 {
os.Setenv("KUBERNETES_SERVICE_PORT", "443")
}
return rest.InClusterConfig()
}
func IsKubernetesResourceAlreadyExistError(err error) bool {
return apierrors.IsAlreadyExists(err)
}
func IsKubernetesResourceNotFoundError(err error) bool {
return apierrors.IsNotFound(err)
}
// We are using internal api types for cluster related.
func ClusterListOpt(clusterName string) metav1.ListOptions {
return metav1.ListOptions{
LabelSelector: LabelSelectorForCluster(clusterName).String(),
}
}
// LabelSelectorForCluster returns a label selector that matches resources belonging to the NATS cluster with the specified name.
func LabelSelectorForCluster(clusterName string) labels.Selector {
return labels.SelectorFromSet(LabelsForCluster(clusterName))
}
// NatsServiceRoleLabelSelectorForCuster returns a label selector that matches NatsServiceRole resources referencing the NATS cluster with the specified name.
func NatsServiceRoleLabelSelectorForCluster(clusterName string) labels.Selector {
return labels.SelectorFromSet(map[string]string{
LabelClusterNameKey: clusterName,
})
}
func LabelsForCluster(clusterName string) map[string]string {
return map[string]string{
LabelAppKey: LabelAppValue,
LabelClusterNameKey: clusterName,
}
}
func CreatePatch(o, n, datastruct interface{}) ([]byte, error) {
oldData, err := json.Marshal(o)
if err != nil {
return nil, err
}
newData, err := json.Marshal(n)
if err != nil {
return nil, err
}
return strategicpatch.CreateTwoWayMergePatch(oldData, newData, datastruct)
}
// mergeMaps merges l2 into l1. Conflicting keys will be skipped.
func mergeMaps(l1, l2 map[string]string) {
for k, v := range l2 {
if _, ok := l1[k]; ok {
continue
}
l1[k] = v
}
}
// UniquePodName generates a unique name for the Pod.
func UniquePodName() string {
return fmt.Sprintf("nats-%s", k8srand.String(10))
}
// ResourceKey returns the "namespace/name" key that represents the specified resource.
func ResourceKey(obj interface{}) string {
res, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return "(unknown)"
}
return res
}
// resolvePath allows us to use a name for a file which might be an absolute path
// instead; the directory first argument is thus only a _default_ container.
func resolvePath(dirpath, fileOrPath string) string {
if filepath.IsAbs(fileOrPath) {
return fileOrPath
}
return filepath.Join(dirpath, fileOrPath)
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
internal/logs/logs.go | // Package logs provides logging utilities.
package logs
import (
"os"
"github.com/apex/log"
)
// Fields returns the global log fields.
func Fields() log.Fields {
f := log.Fields{
"app": os.Getenv("AWS_LAMBDA_FUNCTION_NAME"),
"region": os.Getenv("AWS_REGION"),
"version": os.Getenv("AWS_LAMBDA_FUNCTION_VERSION"),
"stage": os.Getenv("UP_STAGE"),
}
if s := os.Getenv("UP_COMMIT"); s != "" {
f["commit"] = s
}
return f
}
// Plugin returns a log context for the given plugin name.
func Plugin(name string) log.Interface {
f := Fields()
f["plugin"] = name
return log.WithFields(f)
}
| [
"\"AWS_LAMBDA_FUNCTION_NAME\"",
"\"AWS_REGION\"",
"\"AWS_LAMBDA_FUNCTION_VERSION\"",
"\"UP_STAGE\"",
"\"UP_COMMIT\""
]
| []
| [
"UP_STAGE",
"AWS_REGION",
"AWS_LAMBDA_FUNCTION_NAME",
"UP_COMMIT",
"AWS_LAMBDA_FUNCTION_VERSION"
]
| [] | ["UP_STAGE", "AWS_REGION", "AWS_LAMBDA_FUNCTION_NAME", "UP_COMMIT", "AWS_LAMBDA_FUNCTION_VERSION"] | go | 5 | 0 | |
src/main/java/com/bs/test/PublicGistTest/IP_Geo_Location.java | package com.bs.test.PublicGistTest;
import java.io.File;
import java.net.URL;
import org.openqa.selenium.By;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
public class IP_Geo_Location {
public static void main(String[] args) {
String USERNAME = System.getenv("BROWSERSTACK_USERNAME");
String AUTOMATE_KEY = System.getenv("BROWSERSTACK_ACCESS_KEY");
String HUB_URL = "http://" + USERNAME + ":" + AUTOMATE_KEY + "@hub-cloud.browserstack.com/wd/hub";
String AUT_URL = "https://mylocation.org/";
URL URLObj = null;
WebDriver webDriver = null;
DesiredCapabilities caps = null;
try {
URLObj = new URL(HUB_URL);
caps = new DesiredCapabilities();
caps.setCapability("os", "Windows");
caps.setCapability("os_version", "10");
caps.setCapability("browserName", "Chrome");
caps.setCapability("browser_version", "68.0");
caps.setCapability("browserstack.geoLocation", "AR"); // VISIT - https://www.browserstack.com/automate/ip-geolocation
caps.setCapability("project", "Test Run");
caps.setCapability("build", "Support Automate");
caps.setCapability("name", "IP GeoLocation Capability");
caps.setCapability("browserstack.console", "verbose");
caps.setCapability("browserstack.debug", "true");
webDriver = new RemoteWebDriver(URLObj, caps);
webDriver.get(AUT_URL);
webDriver.manage().window().maximize();
// SIGN IN
Thread.sleep(10000);
System.out.println("Country :: " + webDriver.findElement(By.xpath(".//tr[contains(.,'Country')]/td[2]")).getText());
System.out.println("Region :: " + webDriver.findElement(By.xpath(".//tr[contains(.,'Region')]/td[2]")).getText());
System.out.println("City :: " + webDriver.findElement(By.xpath(".//tr[contains(.,'City')]/td[2]")).getText());
System.out.println("Country :: " + webDriver.findElement(By.xpath(".//tr[contains(.,'Country')]/td[2]")).getText());
take_screenshot(webDriver);
} catch (Exception e) {
e.printStackTrace();
} finally {
if(webDriver != null){
webDriver.quit();
}
}
} // MAIN END
public static void take_screenshot(WebDriver webDriver){
try {
TakesScreenshot scrShot =((TakesScreenshot)webDriver);
File SrcFile=scrShot.getScreenshotAs(OutputType.FILE);
// File DestFile=new File("/Users/test.png");
// FileUtils.copyFile(SrcFile, DestFile);
} catch (Exception e) {
e.printStackTrace();
}
} // FUNC END
} // CLASS END | [
"\"BROWSERSTACK_USERNAME\"",
"\"BROWSERSTACK_ACCESS_KEY\""
]
| []
| [
"BROWSERSTACK_ACCESS_KEY",
"BROWSERSTACK_USERNAME"
]
| [] | ["BROWSERSTACK_ACCESS_KEY", "BROWSERSTACK_USERNAME"] | java | 2 | 0 | |
baselines/common/tf_util.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs}
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt_name, value in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus(session_config=None):
# based on recipe from https://stackoverflow.com/a/38580201
# Unless we allocate a session here, subsequent attempts to create one
# will ignore our custom config (in particular, allow_growth=True will have
# no effect).
if session_config is None:
session_config = get_session()._config
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices(session_config)
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def save_trpo_variables(save_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name[3:]: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
import joblib
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
| []
| []
| [
"RCALL_NUM_CPU"
]
| [] | ["RCALL_NUM_CPU"] | python | 1 | 0 | |
examples/FasterRCNN/predict.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import itertools
import numpy as np
import os
import shutil
import tensorflow as tf
import cv2
import six
import tqdm
assert six.PY3, "This example requires Python 3!"
import tensorpack.utils.viz as tpviz
from tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig
from tensorpack.tfutils import get_model_loader, get_tf_version_tuple
from tensorpack.utils import fs, logger
from dataset import DatasetRegistry, register_coco
from config import config as cfg
from config import finalize_configs
from data import get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, multithread_predict_dataflow, predict_image
from modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel
from viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow()
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_tower))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_tower)
for k in range(num_tower)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DatasetRegistry.get(dataset).eval_inference_results(all_results, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation.', required=True)
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--benchmark', action='store_true', help="Benchmark the speed of the model + postprocessing")
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
elif args.benchmark:
df = get_eval_dataflow(cfg.DATA.VAL[0])
df.reset_state()
predictor = OfflinePredictor(predcfg)
for img in tqdm.tqdm(df, total=len(df)):
# This include post-processing time, which is done on CPU and not optimized
# To exclude it, modify `predict_image`.
predict_image(img[0], predictor)
| []
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | python | 1 | 0 | |
fuse/ipns/ipns_unix.go | // +build !nofuse
// package fuse/ipns implements a fuse filesystem that interfaces
// with ipns, the naming system for ipfs.
package ipns
import (
"context"
"errors"
"fmt"
"io"
"os"
core "github.com/ipfs/go-ipfs/core"
namesys "github.com/ipfs/go-ipfs/namesys"
ft "gx/ipfs/QmPL8bYtbACcSFFiSr4s2du7Na382NxRADR8hC7D9FkEA2/go-unixfs"
path "gx/ipfs/QmX7uSbkNz76yNwBhuwYwRbhihLnJqM73VTCjS3UMJud9A/go-path"
dag "gx/ipfs/QmXv5mwmQ74r4aiHcNeQ4GAmfB3aWJuqaE4WyDfDfvkgLM/go-merkledag"
cid "gx/ipfs/QmPSQnBKM9g7BaUcZCvswUJVscQ1ipjmwxN5PXCjkp9EQ7/go-cid"
ci "gx/ipfs/QmPvyPwuCgJ7pDmrKDxRtsScJgBaM5h4EpRL2qQJsmXf4n/go-libp2p-crypto"
peer "gx/ipfs/QmQsErDt8Qgw1XrsXf2BpEzDgGWtB1YLsTAARBup5b6B9W/go-libp2p-peer"
logging "gx/ipfs/QmRREK2CAZ5Re2Bd9zZFG6FeYDppUWt5cMgsoUEp3ktgSr/go-log"
mfs "gx/ipfs/QmRkrpnhZqDxTxwGCsDbuZMr7uCFZHH6SGfrcjgEQwxF3t/go-mfs"
fuse "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse"
fs "gx/ipfs/QmSJBsmLP1XMjv8hxYg2rUMdPDB7YUpyBo9idjrJ6Cmq6F/fuse/fs"
)
func init() {
if os.Getenv("IPFS_FUSE_DEBUG") != "" {
fuse.Debug = func(msg interface{}) {
fmt.Println(msg)
}
}
}
var log = logging.Logger("fuse/ipns")
// FileSystem is the readwrite IPNS Fuse Filesystem.
type FileSystem struct {
Ipfs *core.IpfsNode
RootNode *Root
}
// NewFileSystem constructs new fs using given core.IpfsNode instance.
func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string) (*FileSystem, error) {
kmap := map[string]ci.PrivKey{
"local": sk,
}
root, err := CreateRoot(ipfs, kmap, ipfspath, ipnspath)
if err != nil {
return nil, err
}
return &FileSystem{Ipfs: ipfs, RootNode: root}, nil
}
// Root constructs the Root of the filesystem, a Root object.
func (f *FileSystem) Root() (fs.Node, error) {
log.Debug("filesystem, get root")
return f.RootNode, nil
}
func (f *FileSystem) Destroy() {
err := f.RootNode.Close()
if err != nil {
log.Errorf("Error Shutting Down Filesystem: %s\n", err)
}
}
// Root is the root object of the filesystem tree.
type Root struct {
Ipfs *core.IpfsNode
Keys map[string]ci.PrivKey
// Used for symlinking into ipfs
IpfsRoot string
IpnsRoot string
LocalDirs map[string]fs.Node
Roots map[string]*keyRoot
LocalLinks map[string]*Link
}
func ipnsPubFunc(ipfs *core.IpfsNode, k ci.PrivKey) mfs.PubFunc {
return func(ctx context.Context, c cid.Cid) error {
return ipfs.Namesys.Publish(ctx, k, path.FromCid(c))
}
}
func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string) (fs.Node, error) {
p, err := path.ParsePath("/ipns/" + name)
if err != nil {
log.Errorf("mkpath %s: %s", name, err)
return nil, err
}
node, err := core.Resolve(ctx, ipfs.Namesys, ipfs.Resolver, p)
switch err {
case nil:
case namesys.ErrResolveFailed:
node = ft.EmptyDirNode()
default:
log.Errorf("looking up %s: %s", p, err)
return nil, err
}
pbnode, ok := node.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
root, err := mfs.NewRoot(ctx, ipfs.DAG, pbnode, ipnsPubFunc(ipfs, rt.k))
if err != nil {
return nil, err
}
rt.root = root
return &Directory{dir: root.GetDirectory()}, nil
}
type keyRoot struct {
k ci.PrivKey
alias string
root *mfs.Root
}
func CreateRoot(ipfs *core.IpfsNode, keys map[string]ci.PrivKey, ipfspath, ipnspath string) (*Root, error) {
ldirs := make(map[string]fs.Node)
roots := make(map[string]*keyRoot)
links := make(map[string]*Link)
for alias, k := range keys {
pid, err := peer.IDFromPrivateKey(k)
if err != nil {
return nil, err
}
name := pid.Pretty()
kr := &keyRoot{k: k, alias: alias}
fsn, err := loadRoot(ipfs.Context(), kr, ipfs, name)
if err != nil {
return nil, err
}
roots[name] = kr
ldirs[name] = fsn
// set up alias symlink
links[alias] = &Link{
Target: name,
}
}
return &Root{
Ipfs: ipfs,
IpfsRoot: ipfspath,
IpnsRoot: ipnspath,
Keys: keys,
LocalDirs: ldirs,
LocalLinks: links,
Roots: roots,
}, nil
}
// Attr returns file attributes.
func (*Root) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("Root Attr")
a.Mode = os.ModeDir | 0111 // -rw+x
return nil
}
// Lookup performs a lookup under this node.
func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
switch name {
case "mach_kernel", ".hidden", "._.":
// Just quiet some log noise on OS X.
return nil, fuse.ENOENT
}
if lnk, ok := s.LocalLinks[name]; ok {
return lnk, nil
}
nd, ok := s.LocalDirs[name]
if ok {
switch nd := nd.(type) {
case *Directory:
return nd, nil
case *FileNode:
return nd, nil
default:
return nil, fuse.EIO
}
}
// other links go through ipns resolution and are symlinked into the ipfs mountpoint
ipnsName := "/ipns/" + name
resolved, err := s.Ipfs.Namesys.Resolve(s.Ipfs.Context(), ipnsName)
if err != nil {
log.Warningf("ipns: namesys resolve error: %s", err)
return nil, fuse.ENOENT
}
segments := resolved.Segments()
if segments[0] == "ipfs" {
p := path.Join(resolved.Segments()[1:])
return &Link{s.IpfsRoot + "/" + p}, nil
}
log.Error("Invalid path.Path: ", resolved)
return nil, errors.New("invalid path from ipns record")
}
func (r *Root) Close() error {
for _, mr := range r.Roots {
err := mr.root.Close()
if err != nil {
return err
}
}
return nil
}
// Forget is called when the filesystem is unmounted. probably.
// see comments here: http://godoc.org/bazil.org/fuse/fs#FSDestroyer
func (r *Root) Forget() {
err := r.Close()
if err != nil {
log.Error(err)
}
}
// ReadDirAll reads a particular directory. Will show locally available keys
// as well as a symlink to the peerID key
func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
log.Debug("Root ReadDirAll")
var listing []fuse.Dirent
for alias, k := range r.Keys {
pid, err := peer.IDFromPrivateKey(k)
if err != nil {
continue
}
ent := fuse.Dirent{
Name: pid.Pretty(),
Type: fuse.DT_Dir,
}
link := fuse.Dirent{
Name: alias,
Type: fuse.DT_Link,
}
listing = append(listing, ent, link)
}
return listing, nil
}
// Directory is wrapper over an mfs directory to satisfy the fuse fs interface
type Directory struct {
dir *mfs.Directory
}
type FileNode struct {
fi *mfs.File
}
// File is wrapper over an mfs file to satisfy the fuse fs interface
type File struct {
fi mfs.FileDescriptor
}
// Attr returns the attributes of a given node.
func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("Directory Attr")
a.Mode = os.ModeDir | 0555
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
return nil
}
// Attr returns the attributes of a given node.
func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("File Attr")
size, err := fi.fi.Size()
if err != nil {
// In this case, the dag node in question may not be unixfs
return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err)
}
a.Mode = os.FileMode(0666)
a.Size = uint64(size)
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
return nil
}
// Lookup performs a lookup under this node.
func (s *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) {
child, err := s.dir.Child(name)
if err != nil {
// todo: make this error more versatile.
return nil, fuse.ENOENT
}
switch child := child.(type) {
case *mfs.Directory:
return &Directory{dir: child}, nil
case *mfs.File:
return &FileNode{fi: child}, nil
default:
// NB: if this happens, we do not want to continue, unpredictable behaviour
// may occur.
panic("invalid type found under directory. programmer error.")
}
}
// ReadDirAll reads the link structure as directory entries
func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
var entries []fuse.Dirent
listing, err := dir.dir.List(ctx)
if err != nil {
return nil, err
}
for _, entry := range listing {
dirent := fuse.Dirent{Name: entry.Name}
switch mfs.NodeType(entry.Type) {
case mfs.TDir:
dirent.Type = fuse.DT_Dir
case mfs.TFile:
dirent.Type = fuse.DT_File
}
entries = append(entries, dirent)
}
if len(entries) > 0 {
return entries, nil
}
return nil, fuse.ENOENT
}
func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
_, err := fi.fi.Seek(req.Offset, io.SeekStart)
if err != nil {
return err
}
fisize, err := fi.fi.Size()
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
readsize := min(req.Size, int(fisize-req.Offset))
n, err := fi.fi.CtxReadFull(ctx, resp.Data[:readsize])
resp.Data = resp.Data[:n]
return err
}
func (fi *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// TODO: at some point, ensure that WriteAt here respects the context
wrote, err := fi.fi.WriteAt(req.Data, req.Offset)
if err != nil {
return err
}
resp.Size = wrote
return nil
}
func (fi *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
errs := make(chan error, 1)
go func() {
errs <- fi.fi.Flush()
}()
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (fi *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
if req.Valid.Size() {
cursize, err := fi.fi.Size()
if err != nil {
return err
}
if cursize != int64(req.Size) {
err := fi.fi.Truncate(int64(req.Size))
if err != nil {
return err
}
}
}
return nil
}
// Fsync flushes the content in the file to disk, but does not
// update the dag tree internally
func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
errs := make(chan error, 1)
go func() {
errs <- fi.fi.Sync()
}()
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (fi *File) Forget() {
err := fi.fi.Sync()
if err != nil {
log.Debug("forget file error: ", err)
}
}
func (dir *Directory) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
child, err := dir.dir.Mkdir(req.Name)
if err != nil {
return nil, err
}
return &Directory{dir: child}, nil
}
func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
var mfsflag int
switch {
case req.Flags.IsReadOnly():
mfsflag = mfs.OpenReadOnly
case req.Flags.IsWriteOnly():
mfsflag = mfs.OpenWriteOnly
case req.Flags.IsReadWrite():
mfsflag = mfs.OpenReadWrite
default:
return nil, errors.New("unsupported flag type")
}
fd, err := fi.fi.Open(mfsflag, true)
if err != nil {
return nil, err
}
if req.Flags&fuse.OpenTruncate != 0 {
if req.Flags.IsReadOnly() {
log.Error("tried to open a readonly file with truncate")
return nil, fuse.ENOTSUP
}
log.Info("Need to truncate file!")
err := fd.Truncate(0)
if err != nil {
return nil, err
}
} else if req.Flags&fuse.OpenAppend != 0 {
log.Info("Need to append to file!")
if req.Flags.IsReadOnly() {
log.Error("tried to open a readonly file with append")
return nil, fuse.ENOTSUP
}
_, err := fd.Seek(0, io.SeekEnd)
if err != nil {
log.Error("seek reset failed: ", err)
return nil, err
}
}
return &File{fi: fd}, nil
}
func (fi *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
return fi.fi.Close()
}
func (dir *Directory) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
// New 'empty' file
nd := dag.NodeWithData(ft.FilePBData(nil, 0))
err := dir.dir.AddChild(req.Name, nd)
if err != nil {
return nil, nil, err
}
child, err := dir.dir.Child(req.Name)
if err != nil {
return nil, nil, err
}
fi, ok := child.(*mfs.File)
if !ok {
return nil, nil, errors.New("child creation failed")
}
nodechild := &FileNode{fi: fi}
var openflag int
switch {
case req.Flags.IsReadOnly():
openflag = mfs.OpenReadOnly
case req.Flags.IsWriteOnly():
openflag = mfs.OpenWriteOnly
case req.Flags.IsReadWrite():
openflag = mfs.OpenReadWrite
default:
return nil, nil, errors.New("unsupported open mode")
}
fd, err := fi.Open(openflag, true)
if err != nil {
return nil, nil, err
}
return nodechild, &File{fi: fd}, nil
}
func (dir *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
err := dir.dir.Unlink(req.Name)
if err != nil {
return fuse.ENOENT
}
return nil
}
// Rename implements NodeRenamer
func (dir *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error {
cur, err := dir.dir.Child(req.OldName)
if err != nil {
return err
}
err = dir.dir.Unlink(req.OldName)
if err != nil {
return err
}
switch newDir := newDir.(type) {
case *Directory:
nd, err := cur.GetNode()
if err != nil {
return err
}
err = newDir.dir.AddChild(req.NewName, nd)
if err != nil {
return err
}
case *FileNode:
log.Error("Cannot move node into a file!")
return fuse.EPERM
default:
log.Error("Unknown node type for rename target dir!")
return errors.New("unknown fs node type")
}
return nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// to check that out Node implements all the interfaces we want
type ipnsRoot interface {
fs.Node
fs.HandleReadDirAller
fs.NodeStringLookuper
}
var _ ipnsRoot = (*Root)(nil)
type ipnsDirectory interface {
fs.HandleReadDirAller
fs.Node
fs.NodeCreater
fs.NodeMkdirer
fs.NodeRemover
fs.NodeRenamer
fs.NodeStringLookuper
}
var _ ipnsDirectory = (*Directory)(nil)
type ipnsFile interface {
fs.HandleFlusher
fs.HandleReader
fs.HandleWriter
fs.HandleReleaser
}
type ipnsFileNode interface {
fs.Node
fs.NodeFsyncer
fs.NodeOpener
}
var _ ipnsFileNode = (*FileNode)(nil)
var _ ipnsFile = (*File)(nil)
| [
"\"IPFS_FUSE_DEBUG\""
]
| []
| [
"IPFS_FUSE_DEBUG"
]
| [] | ["IPFS_FUSE_DEBUG"] | go | 1 | 0 | |
python/helpers/pycharm/behave_runner.py | # coding=utf-8
"""
Behave BDD runner.
See _bdd_utils#get_path_by_env for information how to pass list of features here.
Each feature could be file, folder with feature files or folder with "features" subfolder
Other args are tag expressionsin format (--tags=.. --tags=..).
See https://pythonhosted.org/behave/behave.html#tag-expression
"""
import functools
import glob
import sys
import os
import traceback
from behave.formatter.base import Formatter
from behave.model import Step, ScenarioOutline, Feature, Scenario
from behave.tag_expression import TagExpression
import re
import _bdd_utils
from distutils import version
from behave import __version__ as behave_version
from _jb_utils import VersionAgnosticUtils
_MAX_STEPS_SEARCH_FEATURES = 5000 # Do not look for features in folder that has more that this number of children
_FEATURES_FOLDER = 'features' # "features" folder name.
__author__ = 'Ilya.Kazakevich'
from behave import configuration, runner
def _get_dirs_to_run(base_dir_to_search):
"""
Searches for "features" dirs in some base_dir
:return: list of feature dirs to run
:rtype: list
:param base_dir_to_search root directory to search (should not have too many children!)
:type base_dir_to_search str
"""
result = set()
for (step, (folder, sub_folders, files)) in enumerate(os.walk(base_dir_to_search)):
if os.path.basename(folder) == _FEATURES_FOLDER and os.path.isdir(folder):
result.add(os.path.abspath(folder))
if step == _MAX_STEPS_SEARCH_FEATURES: # Guard
err = "Folder {0} is too deep to find any features folder. Please provider concrete folder".format(
base_dir_to_search)
raise Exception(err)
return list(result)
def _merge_hooks_wrapper(*hooks):
"""
Creates wrapper that runs provided behave hooks sequentally
:param hooks: hooks to run
:return: wrapper
"""
# TODO: Wheel reinvented!!!!
def wrapper(*args, **kwargs):
for hook in hooks:
hook(*args, **kwargs)
return wrapper
class _RunnerWrapper(runner.Runner):
"""
Wrapper around behave native wrapper. Has nothing todo with BddRunner!
We need it to support dry runs (to fetch data from scenarios) and hooks api
"""
def __init__(self, config, hooks):
"""
:type config configuration.Configuration
:param config behave configuration
:type hooks dict
:param hooks hooks in format "before_scenario" => f(context, scenario) to load after/before hooks, provided by user
"""
super(_RunnerWrapper, self).__init__(config)
self.dry_run = False
"""
Does not run tests (only fetches "self.features") if true. Runs tests otherwise.
"""
self.__hooks = hooks
def load_hooks(self, filename='environment.py'):
"""
Overrides parent "load_hooks" to add "self.__hooks"
:param filename: env. file name
"""
super(_RunnerWrapper, self).load_hooks(filename)
for (hook_name, hook) in self.__hooks.items():
hook_to_add = hook
if hook_name in self.hooks:
user_hook = self.hooks[hook_name]
if hook_name.startswith("before"):
user_and_custom_hook = [user_hook, hook]
else:
user_and_custom_hook = [hook, user_hook]
hook_to_add = _merge_hooks_wrapper(*user_and_custom_hook)
self.hooks[hook_name] = hook_to_add
def run_model(self, features=None):
"""
Overrides parent method to stop (do nothing) in case of "dry_run"
:param features: features to run
:return:
"""
if self.dry_run: # To stop further execution
return
return super(_RunnerWrapper, self).run_model(features)
def clean(self):
"""
Cleans runner after dry run (clears hooks, features etc). To be called before real run!
"""
self.dry_run = False
self.hooks.clear()
self.features = []
class _BehaveRunner(_bdd_utils.BddRunner):
"""
BddRunner for behave
"""
def __process_hook(self, is_started, context, element):
"""
Hook to be installed. Reports steps, features etc.
:param is_started true if test/feature/scenario is started
:type is_started bool
:param context behave context
:type context behave.runner.Context
:param element feature/suite/step
"""
element.location.file = element.location.filename # To preserve _bdd_utils contract
utils = VersionAgnosticUtils()
if isinstance(element, Step):
# Process step
step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
if is_started:
self._test_started(step_name, element.location)
elif element.status == 'passed':
self._test_passed(step_name, element.duration)
elif element.status == 'failed':
trace = utils.to_unicode("".join(traceback.format_tb(element.exc_traceback)))
error_message = u"{0}: ".format(type(element.exception).__name__) + utils.to_unicode(element.exception)
self._test_failed(step_name, error_message, trace, duration=element.duration)
elif element.status == 'undefined':
self._test_undefined(step_name, element.location)
else:
self._test_skipped(step_name, element.status, element.location)
elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
# To process scenarios with undefined/skipped tests
for step in element.steps:
assert isinstance(step, Step), step
if step.status not in ['passed', 'failed']: # Something strange, probably skipped or undefined
self.__process_hook(False, context, step)
self._feature_or_scenario(is_started, element.name, element.location)
elif isinstance(element, ScenarioOutline):
self._feature_or_scenario(is_started, str(element.examples), element.location)
else:
self._feature_or_scenario(is_started, element.name, element.location)
def __init__(self, config, base_dir):
"""
:type config configuration.Configuration
"""
super(_BehaveRunner, self).__init__(base_dir)
self.__config = config
# Install hooks
self.__real_runner = _RunnerWrapper(config, {
"before_feature": functools.partial(self.__process_hook, True),
"after_feature": functools.partial(self.__process_hook, False),
"before_scenario": functools.partial(self.__process_hook, True),
"after_scenario": functools.partial(self.__process_hook, False),
"before_step": functools.partial(self.__process_hook, True),
"after_step": functools.partial(self.__process_hook, False)
})
def _run_tests(self):
self.__real_runner.run()
def __filter_scenarios_by_args(self, scenario):
"""
Filters out scenarios that should be skipped by tags or scenario names
:param scenario scenario to check
:return true if should pass
"""
assert isinstance(scenario, Scenario), scenario
# TODO: share with lettuce_runner.py#_get_features_to_run
expected_tags = self.__config.tags
scenario_name_re = self.__config.name_re
if scenario_name_re and not scenario_name_re.match(scenario.name):
return False
if not expected_tags:
return True # No tags nor names are required
return isinstance(expected_tags, TagExpression) and expected_tags.check(scenario.tags)
def _get_features_to_run(self):
self.__real_runner.dry_run = True
self.__real_runner.run()
features_to_run = self.__real_runner.features
self.__real_runner.clean() # To make sure nothing left after dry run
# Change outline scenario skeletons with real scenarios
for feature in features_to_run:
assert isinstance(feature, Feature), feature
scenarios = []
for scenario in feature.scenarios:
if isinstance(scenario, ScenarioOutline):
scenarios.extend(scenario.scenarios)
else:
scenarios.append(scenario)
feature.scenarios = filter(self.__filter_scenarios_by_args, scenarios)
return features_to_run
if __name__ == "__main__":
# TODO: support all other params instead
class _Null(Formatter):
"""
Null formater to prevent stdout output
"""
pass
command_args = list(filter(None, sys.argv[1:]))
if command_args:
if "--junit" in command_args:
raise Exception("--junit report type for Behave is unsupported in PyCharm. \n "
"See: https://youtrack.jetbrains.com/issue/PY-14219")
_bdd_utils.fix_win_drive(command_args[0])
(base_dir, scenario_names, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
for scenario_name in scenario_names:
command_args += ["-n", re.escape(scenario_name)] # TODO : rewite pythonic
my_config = configuration.Configuration(command_args=command_args)
# Temporary workaround to support API changes in 1.2.5
if version.LooseVersion(behave_version) >= version.LooseVersion("1.2.5"):
from behave.formatter import _registry
_registry.register_as("com.intellij.python.null",_Null)
else:
from behave.formatter import formatters
formatters.register_as(_Null, "com.intellij.python.null")
my_config.format = ["com.intellij.python.null"] # To prevent output to stdout
my_config.reporters = [] # To prevent summary to stdout
my_config.stdout_capture = False # For test output
my_config.stderr_capture = False # For test output
features = set()
for feature in what_to_run:
if os.path.isfile(feature) or glob.glob(
os.path.join(feature, "*.feature")): # File of folder with "features" provided, load it
features.add(feature)
elif os.path.isdir(feature):
features |= set(_get_dirs_to_run(feature)) # Find "features" subfolder
my_config.paths = list(features)
if what_to_run and not my_config.paths:
raise Exception("Nothing to run in {0}".format(what_to_run))
_BehaveRunner(my_config, base_dir).run()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.packaging.util;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* Represents an installation of Elasticsearch
*/
public class Installation {
// in the future we'll run as a role user on Windows
public static final String ARCHIVE_OWNER = Platforms.WINDOWS
? System.getenv("username")
: "elasticsearch";
private final Shell sh;
public final Distribution distribution;
public final Path home;
public final Path bin; // this isn't a first-class installation feature but we include it for convenience
public final Path lib; // same
public final Path bundledJdk;
public final Path config;
public final Path data;
public final Path logs;
public final Path plugins;
public final Path modules;
public final Path pidDir;
public final Path envFile;
private Installation(Shell sh, Distribution distribution, Path home, Path config, Path data, Path logs,
Path plugins, Path modules, Path pidDir, Path envFile) {
this.sh = sh;
this.distribution = distribution;
this.home = home;
this.bin = home.resolve("bin");
this.lib = home.resolve("lib");
this.bundledJdk = home.resolve("jdk");
this.config = config;
this.data = data;
this.logs = logs;
this.plugins = plugins;
this.modules = modules;
this.pidDir = pidDir;
this.envFile = envFile;
}
public static Installation ofArchive(Shell sh, Distribution distribution, Path home) {
return new Installation(
sh,
distribution,
home,
home.resolve("config"),
home.resolve("data"),
home.resolve("logs"),
home.resolve("plugins"),
home.resolve("modules"),
null,
null
);
}
public static Installation ofPackage(Shell sh, Distribution distribution) {
final Path envFile = (distribution.packaging == Distribution.Packaging.RPM)
? Paths.get("/etc/sysconfig/elasticsearch")
: Paths.get("/etc/default/elasticsearch");
return new Installation(
sh,
distribution,
Paths.get("/usr/share/elasticsearch"),
Paths.get("/etc/elasticsearch"),
Paths.get("/var/lib/elasticsearch"),
Paths.get("/var/log/elasticsearch"),
Paths.get("/usr/share/elasticsearch/plugins"),
Paths.get("/usr/share/elasticsearch/modules"),
Paths.get("/var/run/elasticsearch"),
envFile
);
}
public static Installation ofContainer(Shell sh, Distribution distribution) {
String root = "/usr/share/elasticsearch";
return new Installation(
sh,
distribution,
Paths.get(root),
Paths.get(root + "/config"),
Paths.get(root + "/data"),
Paths.get(root + "/logs"),
Paths.get(root + "/plugins"),
Paths.get(root + "/modules"),
null,
null
);
}
/**
* Returns the user that owns this installation.
*
* For packages this is root, and for archives it is the user doing the installation.
*/
public String getOwner() {
if (Platforms.WINDOWS) {
// windows is always administrator, since there is no sudo
return "BUILTIN\\Administrators";
}
return distribution.isArchive() ? ARCHIVE_OWNER : "root";
}
public Path bin(String executableName) {
return bin.resolve(executableName);
}
public Path config(String configFileName) {
return config.resolve(configFileName);
}
public Executables executables() {
return new Executables();
}
public class Executable {
public final Path path;
private Executable(String name) {
final String platformExecutableName = Platforms.WINDOWS
? name + ".bat"
: name;
this.path = bin(platformExecutableName);
}
@Override
public String toString() {
return path.toString();
}
public Shell.Result run(String args) {
return run(args, null);
}
public Shell.Result run(String args, String input) {
String command = path + " " + args;
if (distribution.isArchive() && Platforms.WINDOWS == false) {
command = "sudo -E -u " + ARCHIVE_OWNER + " " + command;
}
if (input != null) {
command = "echo \"" + input + "\" | " + command;
}
return sh.run(command);
}
}
public class Executables {
public final Executable elasticsearch = new Executable("elasticsearch");
public final Executable pluginTool = new Executable("elasticsearch-plugin");
public final Executable keystoreTool = new Executable("elasticsearch-keystore");
public final Executable certutilTool = new Executable("elasticsearch-certutil");
public final Executable certgenTool = new Executable("elasticsearch-certgen");
public final Executable cronevalTool = new Executable("elasticsearch-croneval");
public final Executable shardTool = new Executable("elasticsearch-shard");
public final Executable nodeTool = new Executable("elasticsearch-node");
public final Executable setupPasswordsTool = new Executable("elasticsearch-setup-passwords");
public final Executable sqlCli = new Executable("elasticsearch-sql-cli");
public final Executable syskeygenTool = new Executable("elasticsearch-syskeygen");
public final Executable usersTool = new Executable("elasticsearch-users");
}
}
| [
"\"username\""
]
| []
| [
"username"
]
| [] | ["username"] | java | 1 | 0 | |
support/scripts/build_release_artifacts/main.go | package main
// See README.md for a description of this script
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/actionorg/go-action-sdk/support/errors"
"github.com/actionorg/go-action-sdk/support/log"
)
var extractBinName = regexp.MustCompile(`^(?P<bin>[a-z-]+)-(?P<tag>.+)$`)
var builds = []struct {
OS string
Arch string
}{
{"darwin", "amd64"},
{"linux", "amd64"},
{"linux", "arm"},
{"windows", "amd64"},
}
func main() {
log.SetLevel(log.InfoLevel)
run("rm", "-rf", "dist/*")
if os.Getenv("TRAVIS_EVENT_TYPE") == "cron" {
buildNightlies()
os.Exit(0)
} else if os.Getenv("TRAVIS_TAG") != "" {
buildByTag()
os.Exit(0)
}
log.Info("nothing to do")
}
// package searches the `tools` and `services` packages of this repo to find
// the source directory. This is used within the script to find the README and
// other files that should be packaged with the binary.
func binPkgNames() []string {
result := []string{}
result = append(result, binNamesForDir("services")...)
result = append(result, binNamesForDir("tools")...)
return result
}
func binNamesForDir(dir string) []string {
files, err := ioutil.ReadDir(dir)
if err != nil {
panic(errors.Wrap(err, "read-dir failed"))
}
result := []string{}
for _, file := range files {
if file.IsDir() {
result = append(result, filepath.Join(dir, file.Name()))
}
}
return result
}
func build(pkg, dest, version, buildOS, buildArch string) {
buildTime := time.Now().Format(time.RFC3339)
timeFlag := fmt.Sprintf("-X github.com/stellar/go/support/app.buildTime=%s", buildTime)
versionFlag := fmt.Sprintf("-X github.com/stellar/go/support/app.version=%s", version)
if buildOS == "windows" {
dest = dest + ".exe"
}
cmd := exec.Command("go", "build",
"-o", dest,
"-ldflags", fmt.Sprintf("%s %s", timeFlag, versionFlag),
pkg,
)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Env = append(
os.Environ(),
fmt.Sprintf("GOOS=%s", buildOS),
fmt.Sprintf("GOARCH=%s", buildArch),
)
log.Infof("building %s", pkg)
log.Infof("running: %s", strings.Join(cmd.Args, " "))
err := cmd.Run()
if err != nil {
panic(err)
}
}
func buildNightlies() {
version := runOutput("git", "describe", "--always", "--dirty", "--tags")
repo := repoName()
for _, pkg := range binPkgNames() {
bin := filepath.Base(pkg)
for _, cfg := range builds {
dest := prepareDest(pkg, bin, "nightly", cfg.OS, cfg.Arch)
build(
fmt.Sprintf("%s/%s", repo, pkg),
filepath.Join(dest, bin),
version,
cfg.OS,
cfg.Arch,
)
packageArchive(dest, cfg.OS)
}
}
}
func buildByTag() {
bin, version := extractFromTag(os.Getenv("TRAVIS_TAG"))
pkg := packageName(bin)
repo := repoName()
if bin == "" {
log.Info("could not extract info from TRAVIS_TAG: skipping artifact packaging")
os.Exit(0)
}
for _, cfg := range builds {
dest := prepareDest(pkg, bin, version, cfg.OS, cfg.Arch)
// rebuild the binary with the version variable set
build(
fmt.Sprintf("%s/%s", repo, pkg),
filepath.Join(dest, bin),
version,
cfg.OS,
cfg.Arch,
)
packageArchive(dest, cfg.OS)
}
}
// extractFromTag extracts the name of the binary that should be packaged in the
// course of execution this script as well as the version it should be packaged
// as, based on the name of the tag in the TRAVIS_TAG environment variable.
// Tags must be of the form `NAME-vSEMVER`, such as `horizon-v1.0.0` to be
// matched by this function.
//
// In the event that the TRAVIS_TAG is missing or the match fails, an empty
// string will be returned.
func extractFromTag(tag string) (string, string) {
match := extractBinName.FindStringSubmatch(tag)
if match == nil {
return "", ""
}
return match[1], match[2]
}
// packageArchive tars or zips `dest`, depending upon the OS, then removes
// `dest`, in preparation of travis uploading all artifacts to github releases.
func packageArchive(dest, buildOS string) {
release := filepath.Base(dest)
dir := filepath.Dir(dest)
if buildOS == "windows" {
pop := pushdir(dir)
// zip $RELEASE.zip $RELEASE/*
run("zip", "-r", release+".zip", release)
pop()
} else {
// tar -czf $dest.tar.gz -C $DIST $RELEASE
run("tar", "-czf", dest+".tar.gz", "-C", dir, release)
}
run("rm", "-rf", dest)
}
// package searches the `tools` and `services` packages of this repo to find
// the source directory. This is used within the script to find the README and
// other files that should be packaged with the binary.
func packageName(binName string) string {
targets := []string{
filepath.Join("services", binName),
filepath.Join("tools", binName),
}
var result string
// Note: we do not short circuit this search when we find a valid result so
// that we can panic when multiple results are found. The children of
// /services and /tools should not have name overlap.
for _, t := range targets {
_, err := os.Stat(t)
if os.IsNotExist(err) {
continue
}
if err != nil {
panic(errors.Wrap(err, "stat failed"))
}
if result != "" {
panic("sourceDir() found multiple results!")
}
result = t
}
return result
}
func prepareDest(pkg, bin, version, os, arch string) string {
name := fmt.Sprintf("%s-%s-%s-%s", bin, version, os, arch)
dest := filepath.Join("dist", name)
// make destination directories
run("mkdir", "-p", dest)
run("cp", "LICENSE-APACHE.txt", dest)
run("cp", "COPYING", dest)
run("cp", filepath.Join(pkg, "README.md"), dest)
run("cp", filepath.Join(pkg, "CHANGELOG.md"), dest)
return dest
}
// pushdir is a utility function to temporarily change directories. It returns
// a func that can be called to restore the current working directory to the
// state it was in when first calling pushdir.
func pushdir(dir string) func() {
cwd, err := os.Getwd()
if err != nil {
panic(errors.Wrap(err, "getwd failed"))
}
err = os.Chdir(dir)
if err != nil {
panic(errors.Wrap(err, "chdir failed"))
}
return func() {
err := os.Chdir(cwd)
if err != nil {
panic(errors.Wrap(err, "revert dir failed"))
}
}
}
func repoName() string {
if os.Getenv("REPO") != "" {
return os.Getenv("REPO")
}
return "github.com/stellar/go"
}
// utility command to run the provided command that echoes any output. A failed
// command will trigger a panic.
func run(name string, args ...string) {
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
log.Infof("running: %s %s", name, strings.Join(args, " "))
err := cmd.Run()
if err != nil {
panic(err)
}
}
// utility command to run the provided command that returns the output. A
// failed command will trigger a panic.
func runOutput(name string, args ...string) string {
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
log.Infof("running: %s %s", name, strings.Join(args, " "))
out, err := cmd.Output()
if err != nil {
panic(err)
}
return strings.TrimSpace(string(out))
}
| [
"\"TRAVIS_EVENT_TYPE\"",
"\"TRAVIS_TAG\"",
"\"TRAVIS_TAG\"",
"\"REPO\"",
"\"REPO\""
]
| []
| [
"TRAVIS_EVENT_TYPE",
"REPO",
"TRAVIS_TAG"
]
| [] | ["TRAVIS_EVENT_TYPE", "REPO", "TRAVIS_TAG"] | go | 3 | 0 | |
test/e2e/subnet/normal.go | package subnet
import (
"context"
"fmt"
"os"
"time"
kubeovn "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
var _ = Describe("[Subnet]", func() {
f := framework.NewFramework("subnet", fmt.Sprintf("%s/.kube/config", os.Getenv("HOME")))
BeforeEach(func() {
if err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil {
if !k8serrors.IsNotFound(err) {
klog.Fatalf("failed to delete subnet %s, %v", f.GetName(), err)
}
}
if err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil {
if !k8serrors.IsNotFound(err) {
klog.Fatalf("failed to delete ns %s, %v", f.GetName(), err)
}
}
})
AfterEach(func() {
if err := f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil {
if !k8serrors.IsNotFound(err) {
klog.Fatalf("failed to delete subnet %s, %v", f.GetName(), err)
}
}
if err := f.KubeClientSet.CoreV1().Namespaces().Delete(context.Background(), f.GetName(), metav1.DeleteOptions{}); err != nil {
if !k8serrors.IsNotFound(err) {
klog.Fatalf("failed to delete ns %s, %v", f.GetName(), err)
}
}
})
Describe("Create", func() {
It("only cidr", func() {
name := f.GetName()
By("create subnet")
s := kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "11.10.0.0/16",
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("validate subnet")
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(subnet.Spec.Default).To(BeFalse())
Expect(subnet.Spec.Protocol).To(Equal(kubeovn.ProtocolIPv4))
Expect(subnet.Spec.Namespaces).To(BeEmpty())
Expect(subnet.Spec.ExcludeIps).To(ContainElement("11.10.0.1"))
Expect(subnet.Spec.Gateway).To(Equal("11.10.0.1"))
Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWDistributedType))
Expect(subnet.Spec.GatewayNode).To(BeEmpty())
Expect(subnet.Spec.NatOutgoing).To(BeFalse())
Expect(subnet.Spec.Private).To(BeFalse())
Expect(subnet.Spec.AllowSubnets).To(BeEmpty())
Expect(subnet.ObjectMeta.Finalizers).To(ContainElement(util.ControllerName))
By("validate status")
Expect(subnet.Status.ActivateGateway).To(BeEmpty())
Expect(subnet.Status.V4AvailableIPs).To(Equal(float64(65533)))
Expect(subnet.Status.V4UsingIPs).To(BeZero())
pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"})
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
stdout, _, err := f.ExecToPodThroughAPI(fmt.Sprintf("ip route list root %s", subnet.Spec.CIDRBlock), "openvswitch", pod.Name, pod.Namespace, nil)
Expect(err).NotTo(HaveOccurred())
Expect(stdout).To(ContainSubstring("ovn0"))
}
})
It("centralized gateway", func() {
name := f.GetName()
By("create subnet")
s := kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "11.11.0.0/16",
GatewayType: kubeovn.GWCentralizedType,
GatewayNode: "kube-ovn-control-plane,kube-ovn-worker,kube-ovn-worker2",
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("validate subnet")
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
time.Sleep(5 * time.Second)
subnet, err := f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(subnet.Spec.GatewayType).To(Equal(kubeovn.GWCentralizedType))
Expect(subnet.Status.ActivateGateway).To(Equal("kube-ovn-control-plane"))
})
})
Describe("Update", func() {
It("distributed to centralized", func() {
name := f.GetName()
By("create subnet")
s := &kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "11.12.0.0/16",
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
s.Spec.GatewayType = kubeovn.GWCentralizedType
s.Spec.GatewayNode = "kube-ovn-control-plane,kube-ovn-worker,kube-ovn-worker2"
_, err = f.OvnClientSet.KubeovnV1().Subnets().Update(context.Background(), s, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
time.Sleep(5 * time.Second)
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(s.Spec.GatewayType).To(Equal(kubeovn.GWCentralizedType))
Expect(s.Status.ActivateGateway).To(Equal("kube-ovn-control-plane"))
})
})
Describe("Delete", func() {
It("normal deletion", func() {
name := f.GetName()
By("create subnet")
s := kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "11.13.0.0/16",
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), &s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
time.Sleep(5 * time.Second)
err = f.OvnClientSet.KubeovnV1().Subnets().Delete(context.Background(), name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
time.Sleep(5 * time.Second)
pods, err := f.KubeClientSet.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{LabelSelector: "app=ovs"})
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
stdout, _, err := f.ExecToPodThroughAPI("ip route", "openvswitch", pod.Name, pod.Namespace, nil)
Expect(err).NotTo(HaveOccurred())
Expect(stdout).NotTo(ContainSubstring(s.Spec.CIDRBlock))
}
})
})
Describe("cidr with nonstandard style", func() {
It("cidr ends with nonzero", func() {
name := f.GetName()
By("create subnet")
s := &kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "11.14.0.1/16",
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(s.Spec.CIDRBlock).To(Equal("11.14.0.0/16"))
})
})
Describe("available ip calculation", func() {
It("no available cidr", func() {
name := f.GetName()
s := &kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "19.0.0.0/31",
ExcludeIps: []string{"179.17.0.0..179.17.0.10"},
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(s.Status.V4AvailableIPs).To(Equal(float64(0)))
})
It("small cidr", func() {
name := f.GetName()
s := &kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "29.0.0.0/30",
ExcludeIps: []string{"179.17.0.0..179.17.0.10"},
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(s.Status.V4AvailableIPs).To(Equal(float64(1)))
})
It("with excludeips", func() {
name := f.GetName()
s := &kubeovn.Subnet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"e2e": "true"},
},
Spec: kubeovn.SubnetSpec{
CIDRBlock: "179.17.0.0/24",
ExcludeIps: []string{"179.17.0.0..179.17.0.10"},
},
}
_, err := f.OvnClientSet.KubeovnV1().Subnets().Create(context.Background(), s, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
err = f.WaitSubnetReady(name)
Expect(err).NotTo(HaveOccurred())
s, err = f.OvnClientSet.KubeovnV1().Subnets().Get(context.Background(), name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(s.Status.V4AvailableIPs).To(Equal(float64(244)))
})
})
})
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
native_client_sdk/src/build_tools/nacl-mono-buildbot.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import json
import os
import sys
import buildbot_common
import build_utils
GS_MANIFEST_PATH = 'gs://nativeclient-mirror/nacl/nacl_sdk/'
SDK_MANIFEST = 'naclsdk_manifest2.json'
MONO_MANIFEST = 'naclmono_manifest.json'
def build_and_upload_mono(sdk_revision, pepper_revision, sdk_url,
upload_path, args):
install_dir = 'naclmono'
buildbot_common.RemoveDir(install_dir)
revision_opt = ['--sdk-revision', sdk_revision] if sdk_revision else []
url_opt = ['--sdk-url', sdk_url] if sdk_url else []
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-32', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-64', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-archive.py',
'--upload-path', upload_path,
'--pepper-revision', pepper_revision,
'--install-dir', install_dir] + args)
def get_sdk_build_info():
'''Returns a list of dictionaries for versions of NaCl Mono to build which are
out of date compared to the SDKs available to naclsdk'''
# Get a copy of the naclsdk manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + SDK_MANIFEST, '.'])
manifest_file = open(SDK_MANIFEST, 'r')
sdk_manifest = json.loads(manifest_file.read())
manifest_file.close()
pepper_infos = []
for key, value in sdk_manifest.items():
if key == 'bundles':
stabilities = ['stable', 'beta', 'dev', 'post_stable']
# Pick pepper_* bundles, need pepper_19 or greater to build Mono
bundles = filter(lambda b: (b['stability'] in stabilities
and 'pepper_' in b['name'])
and b['version'] >= 19, value)
for b in bundles:
newdict = {}
newdict['pepper_revision'] = str(b['version'])
linux_arch = filter(lambda u: u['host_os'] == 'linux', b['archives'])
newdict['sdk_url'] = linux_arch[0]['url']
newdict['sdk_revision'] = b['revision']
newdict['stability'] = b['stability']
newdict['naclmono_name'] = 'naclmono_' + newdict['pepper_revision']
pepper_infos.append(newdict)
# Get a copy of the naclmono manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + MONO_MANIFEST, '.'])
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
ret = []
mono_manifest_dirty = False
# Check to see if we need to rebuild mono based on sdk revision
for key, value in mono_manifest.items():
if key == 'bundles':
for info in pepper_infos:
bundle = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(bundle) == 0:
info['naclmono_rev'] = '1'
ret.append(info)
else:
if info['sdk_revision'] != bundle[0]['sdk_revision']:
# This bundle exists in the mono manifest, bump the revision
# for the new build we're about to make.
info['naclmono_rev'] = str(bundle[0]['revision'] + 1)
ret.append(info)
elif info['stability'] != bundle[0]['stability']:
# If all that happened was the SDK bundle was promoted in stability,
# change only that and re-write the manifest
mono_manifest_dirty = True
bundle[0]['stability'] = info['stability']
# re-write the manifest here because there are no bundles to build but
# the manifest has changed
if mono_manifest_dirty and len(ret) == 0:
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
return ret
def update_mono_sdk_json(infos):
'''Update the naclmono manifest with the newly built packages'''
if len(infos) == 0:
return
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
for info in infos:
bundle = {}
bundle['name'] = info['naclmono_name']
bundle['description'] = 'Mono for Native Client'
bundle['stability'] = info['stability']
bundle['recommended'] = 'no'
bundle['version'] = 'experimental'
archive = {}
sha1_hash = hashlib.sha1()
f = open(info['naclmono_name'] + '.bz2', 'rb')
sha1_hash.update(f.read())
archive['size'] = f.tell()
f.close()
archive['checksum'] = { 'sha1': sha1_hash.hexdigest() }
archive['host_os'] = 'all'
archive['url'] = ('https://commondatastorage.googleapis.com/'
'nativeclient-mirror/nacl/nacl_sdk/%s/%s/%s.bz2'
% (info['naclmono_name'], info['naclmono_rev'],
info['naclmono_name']))
bundle['archives'] = [archive]
bundle['revision'] = int(info['naclmono_rev'])
bundle['sdk_revision'] = int(info['sdk_revision'])
# Insert this new bundle into the manifest,
# probably overwriting an existing bundle.
for key, value in mono_manifest.items():
if key == 'bundles':
existing = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(existing) > 0:
loc = value.index(existing[0])
value[loc] = bundle
else:
value.append(bundle)
# Write out the file locally, then upload to its known location.
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
def main(args):
args = args[1:]
buildbot_revision = os.environ.get('BUILDBOT_REVISION', '')
buildername = os.environ.get('BUILDBOT_BUILDERNAME', '')
os.chdir(buildbot_common.SCRIPT_DIR)
if buildername == 'linux-sdk-mono32':
assert buildbot_revision
sdk_revision = buildbot_revision.split(':')[0]
pepper_revision = build_utils.ChromeMajorVersion()
build_and_upload_mono(sdk_revision, pepper_revision, None,
'trunk.' + sdk_revision, args)
elif buildername == 'linux-sdk-mono64':
infos = get_sdk_build_info()
for info in infos:
# This will put the file in naclmono_19/1/naclmono_19.bz2 for example.
upload_path = info['naclmono_name'] + '/' + info['naclmono_rev']
build_and_upload_mono(None, info['pepper_revision'], info['sdk_url'],
upload_path, args)
update_mono_sdk_json(infos)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| []
| []
| [
"BUILDBOT_REVISION",
"BUILDBOT_BUILDERNAME"
]
| [] | ["BUILDBOT_REVISION", "BUILDBOT_BUILDERNAME"] | python | 2 | 0 | |
share/we-lang/we-lang.go | package main
import (
"bytes"
_ "crypto/sha512"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"os/user"
"path"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/klauspost/lctime"
"github.com/mattn/go-colorable"
"github.com/mattn/go-runewidth"
)
type configuration struct {
APIKey string
City string
Numdays int
Imperial bool
WindUnit bool
Inverse bool
Lang string
Narrow bool
LocationName string
WindMS bool
RightToLeft bool
}
type cond struct {
ChanceOfRain string `json:"chanceofrain"`
FeelsLikeC int `json:",string"`
PrecipMM float32 `json:"precipMM,string"`
TempC int `json:"tempC,string"`
TempC2 int `json:"temp_C,string"`
Time int `json:"time,string"`
VisibleDistKM int `json:"visibility,string"`
WeatherCode int `json:"weatherCode,string"`
WeatherDesc []struct{ Value string }
WindGustKmph int `json:",string"`
Winddir16Point string
WindspeedKmph int `json:"windspeedKmph,string"`
}
type astro struct {
Moonrise string
Moonset string
Sunrise string
Sunset string
}
type weather struct {
Astronomy []astro
Date string
Hourly []cond
MaxtempC int `json:"maxtempC,string"`
MintempC int `json:"mintempC,string"`
}
type loc struct {
Query string `json:"query"`
Type string `json:"type"`
}
type resp struct {
Data struct {
Cur []cond `json:"current_condition"`
Err []struct{ Msg string } `json:"error"`
Req []loc `json:"request"`
Weather []weather `json:"weather"`
} `json:"data"`
}
var (
ansiEsc *regexp.Regexp
config configuration
configpath string
debug bool
windDir = map[string]string{
"N": "\033[1m↓\033[0m",
"NNE": "\033[1m↓\033[0m",
"NE": "\033[1m↙\033[0m",
"ENE": "\033[1m↙\033[0m",
"E": "\033[1m←\033[0m",
"ESE": "\033[1m←\033[0m",
"SE": "\033[1m↖\033[0m",
"SSE": "\033[1m↖\033[0m",
"S": "\033[1m↑\033[0m",
"SSW": "\033[1m↑\033[0m",
"SW": "\033[1m↗\033[0m",
"WSW": "\033[1m↗\033[0m",
"W": "\033[1m→\033[0m",
"WNW": "\033[1m→\033[0m",
"NW": "\033[1m↘\033[0m",
"NNW": "\033[1m↘\033[0m",
}
unitRain = map[bool]string{
false: "mm",
true: "in",
}
unitTemp = map[bool]string{
false: "C",
true: "F",
}
unitVis = map[bool]string{
false: "km",
true: "mi",
}
unitWind = map[int]string{
0: "km/h",
1: "mph",
2: "m/s",
}
slotTimes = [slotcount]int{9 * 60, 12 * 60, 18 * 60, 22 * 60}
codes = map[int][]string{
113: iconSunny,
116: iconPartlyCloudy,
119: iconCloudy,
122: iconVeryCloudy,
143: iconFog,
176: iconLightShowers,
179: iconLightSleetShowers,
182: iconLightSleet,
185: iconLightSleet,
200: iconThunderyShowers,
227: iconLightSnow,
230: iconHeavySnow,
248: iconFog,
260: iconFog,
263: iconLightShowers,
266: iconLightRain,
281: iconLightSleet,
284: iconLightSleet,
293: iconLightRain,
296: iconLightRain,
299: iconHeavyShowers,
302: iconHeavyRain,
305: iconHeavyShowers,
308: iconHeavyRain,
311: iconLightSleet,
314: iconLightSleet,
317: iconLightSleet,
320: iconLightSnow,
323: iconLightSnowShowers,
326: iconLightSnowShowers,
329: iconHeavySnow,
332: iconHeavySnow,
335: iconHeavySnowShowers,
338: iconHeavySnow,
350: iconLightSleet,
353: iconLightShowers,
356: iconHeavyShowers,
359: iconHeavyRain,
362: iconLightSleetShowers,
365: iconLightSleetShowers,
368: iconLightSnowShowers,
371: iconHeavySnowShowers,
374: iconLightSleetShowers,
377: iconLightSleet,
386: iconThunderyShowers,
389: iconThunderyHeavyRain,
392: iconThunderySnowShowers,
395: iconHeavySnowShowers,
}
iconUnknown = []string{
" .-. ",
" __) ",
" ( ",
" `-’ ",
" • "}
iconSunny = []string{
"\033[38;5;226m \\ / \033[0m",
"\033[38;5;226m .-. \033[0m",
"\033[38;5;226m ― ( ) ― \033[0m",
"\033[38;5;226m `-’ \033[0m",
"\033[38;5;226m / \\ \033[0m"}
iconPartlyCloudy = []string{
"\033[38;5;226m \\ /\033[0m ",
"\033[38;5;226m _ /\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m \\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
" "}
iconCloudy = []string{
" ",
"\033[38;5;250m .--. \033[0m",
"\033[38;5;250m .-( ). \033[0m",
"\033[38;5;250m (___.__)__) \033[0m",
" "}
iconVeryCloudy = []string{
" ",
"\033[38;5;240;1m .--. \033[0m",
"\033[38;5;240;1m .-( ). \033[0m",
"\033[38;5;240;1m (___.__)__) \033[0m",
" "}
iconLightShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconHeavyShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;240;1m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;240;1m( ). \033[0m",
"\033[38;5;226m /\033[38;5;240;1m(___(__) \033[0m",
"\033[38;5;21;1m ‚‘‚‘‚‘‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’‚’‚’ \033[0m"}
iconLightSnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;255m * * * \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconHeavySnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;240;1m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;240;1m( ). \033[0m",
"\033[38;5;226m /\033[38;5;240;1m(___(__) \033[0m",
"\033[38;5;255;1m * * * * \033[0m",
"\033[38;5;255;1m * * * * \033[0m"}
iconLightSleetShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[38;5;255m* \033[0m",
"\033[38;5;255m *\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[0m"}
iconThunderyShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;228;5m ⚡\033[38;5;111;25m‘‘\033[38;5;228;5m⚡\033[38;5;111;25m‘‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconThunderyHeavyRain = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;21;1m ‚‘\033[38;5;228;5m⚡\033[38;5;21;25m‘‚\033[38;5;228;5m⚡\033[38;5;21;25m‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’\033[38;5;228;5m⚡\033[38;5;21;25m’‚’ \033[0m"}
iconThunderySnowShowers = []string{
"\033[38;5;226m _`/\"\"\033[38;5;250m.-. \033[0m",
"\033[38;5;226m ,\\_\033[38;5;250m( ). \033[0m",
"\033[38;5;226m /\033[38;5;250m(___(__) \033[0m",
"\033[38;5;255m *\033[38;5;228;5m⚡\033[38;5;255;25m*\033[38;5;228;5m⚡\033[38;5;255;25m* \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconLightRain = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m",
"\033[38;5;111m ‘ ‘ ‘ ‘ \033[0m"}
iconHeavyRain = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;21;1m ‚‘‚‘‚‘‚‘ \033[0m",
"\033[38;5;21;1m ‚’‚’‚’‚’ \033[0m"}
iconLightSnow = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;255m * * * \033[0m",
"\033[38;5;255m * * * \033[0m"}
iconHeavySnow = []string{
"\033[38;5;240;1m .-. \033[0m",
"\033[38;5;240;1m ( ). \033[0m",
"\033[38;5;240;1m (___(__) \033[0m",
"\033[38;5;255;1m * * * * \033[0m",
"\033[38;5;255;1m * * * * \033[0m"}
iconLightSleet = []string{
"\033[38;5;250m .-. \033[0m",
"\033[38;5;250m ( ). \033[0m",
"\033[38;5;250m (___(__) \033[0m",
"\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[38;5;255m* \033[0m",
"\033[38;5;255m *\033[38;5;111m ‘ \033[38;5;255m*\033[38;5;111m ‘ \033[0m"}
iconFog = []string{
" ",
"\033[38;5;251m _ - _ - _ - \033[0m",
"\033[38;5;251m _ - _ - _ \033[0m",
"\033[38;5;251m _ - _ - _ - \033[0m",
" "}
locale = map[string]string{
"af": "af_ZA",
"am": "am_ET",
"ar": "ar_TN",
"az": "az_AZ",
"be": "be_BY",
"bg": "bg_BG",
"bn": "bn_IN",
"bs": "bs_BA",
"ca": "ca_ES",
"cs": "cs_CZ",
"cy": "cy_GB",
"da": "da_DK",
"de": "de_DE",
"el": "el_GR",
"eo": "eo",
"es": "es_ES",
"et": "et_EE",
"eu": "eu_ES",
"fa": "fa_IR",
"fi": "fi_FI",
"fr": "fr_FR",
"fy": "fy_NL",
"ga": "ga_IE",
"he": "he_IL",
"hi": "hi_IN",
"hr": "hr_HR",
"hu": "hu_HU",
"hy": "hy_AM",
"ia": "ia",
"id": "id_ID",
"is": "is_IS",
"it": "it_IT",
"ja": "ja_JP",
"jv": "en_US",
"ka": "ka_GE",
"kk": "kk_KZ",
"ko": "ko_KR",
"ky": "ky_KG",
"lt": "lt_LT",
"lv": "lv_LV",
"mg": "mg_MG",
"mk": "mk_MK",
"ml": "ml_IN",
"nb": "nb_NO",
"nl": "nl_NL",
"nn": "nn_NO",
"oc": "oc_FR",
"pl": "pl_PL",
"pt-br": "pt_BR",
"pt": "pt_PT",
"ro": "ro_RO",
"ru": "ru_RU",
"sk": "sk_SK",
"sl": "sl_SI",
"sr-lat": "sr_RS@latin",
"sr": "sr_RS",
"sv": "sv_SE",
"sw": "sw_KE",
"ta": "ta_IN",
"th": "th_TH",
"tr": "tr_TR",
"uk": "uk_UA",
"uz": "uz_UZ",
"vi": "vi_VN",
"zh-cn": "zh_CN",
"zh-tw": "zh_TW",
"zh": "zh_CN",
"zu": "zu_ZA",
}
localizedCaption = map[string]string{
"af": "Weer verslag vir:",
"am": "የአየር ሁኔታ ዘገባ ለ ፥",
"ar": "تقرير حالة ألطقس",
"az": "Hava proqnozu:",
"be": "Прагноз надвор'я для:",
"bg": "Прогноза за времето в:",
"bn": "আবহাওয়া সঙ্ক্রান্ত তথ্য",
"bs": "Vremenske prognoze za:",
"ca": "Informe del temps per a:",
"cs": "Předpověď počasí pro:",
"cy": "Adroddiad tywydd ar gyfer:",
"da": "Vejret i:",
"de": "Wetterbericht für:",
"el": "Πρόγνωση καιρού για:",
"eo": "Veterprognozo por:",
"es": "El tiempo en:",
"et": "Ilmaprognoos:",
"eu": "Eguraldia:",
"fa": "اوه و بآ تیعضو شرازگ",
"fi": "Säätiedotus:",
"fr": "Prévisions météo pour:",
"fy": "Waarberjocht foar:",
"ga": "Réamhaisnéis na haimsire do:",
"he": ":ריוואה גזמ תיזחת",
"hi": "मौसम की जानकारी",
"hr": "Vremenska prognoza za:",
"hu": "Időjárás előrejelzés:",
"hy": "Եղանակի տեսություն:",
"ia": "Le tempore a:",
"id": "Prakiraan cuaca:",
"it": "Previsioni meteo:",
"is": "Veðurskýrsla fyrir:",
"ja": "天気予報:",
"jv": "Weather forecast for:",
"ka": "ამინდის პროგნოზი:",
"kk": "Ауа райы:",
"ko": "일기 예보:",
"ky": "Аба ырайы:",
"lt": "Orų prognozė:",
"lv": "Laika ziņas:",
"mk": "Прогноза за времето во:",
"ml": "കാലാവസ്ഥ റിപ്പോർട്ട്:",
"mr": "हवामानाचा अंदाज:",
"nb": "Værmelding for:",
"nl": "Weerbericht voor:",
"nn": "Vêrmelding for:",
"oc": "Previsions metèo per:",
"pl": "Pogoda w:",
"pt": "Previsão do tempo para:",
"pt-br": "Previsão do tempo para:",
"ro": "Prognoza meteo pentru:",
"ru": "Прогноз погоды:",
"sk": "Predpoveď počasia pre:",
"sl": "Vremenska napoved za",
"sr": "Временска прогноза за:",
"sr-lat": "Vremenska prognoza za:",
"sv": "Väderleksprognos för:",
"sw": "Ripoti ya hali ya hewa, jiji la:",
"ta": "வானிலை அறிக்கை",
"te": "వాతావరణ సమాచారము:",
"th": "รายงานสภาพอากาศ:",
"tr": "Hava beklentisi:",
"uk": "Прогноз погоди для:",
"uz": "Ob-havo bashorati:",
"vi": "Báo cáo thời tiết:",
"zu": "Isimo sezulu:",
"zh": "天气预报:",
"zh-cn": "天气预报:",
"zh-tw": "天氣預報:",
"mg": "Vinavina toetr'andro hoan'ny:",
}
daytimeTranslation = map[string][]string{
"af": {"Oggend", "Middag", "Vroegaand", "Laatnag"},
"am": {"ጠዋት", "ከሰዓት በኋላ", "ምሽት", "ሌሊት"},
"ar": {"ﺎﻠﻠﻴﻟ", "ﺎﻠﻤﺳﺍﺀ", "ﺎﻠﻈﻫﺭ", "ﺎﻠﺼﺑﺎﺣ"},
"az": {"Səhər", "Gün", "Axşam", "Gecə"},
"be": {"Раніца", "Дзень", "Вечар", "Ноч"},
"bg": {"Сутрин", "Обяд", "Вечер", "Нощ"},
"bn": {"সকাল", "দুপুর", "সন্ধ্যা", "রাত্রি"},
"bs": {"Ujutro", "Dan", "Večer", "Noć"},
"cs": {"Ráno", "Ve dne", "Večer", "V noci"},
"ca": {"Matí", "Dia", "Tarda", "Nit"},
"cy": {"Bore", "Dydd", "Hwyr", "Nos"},
"da": {"Morgen", "Middag", "Aften", "Nat"},
"de": {"Früh", "Mittag", "Abend", "Nacht"},
"el": {"Πρωί", "Μεσημέρι", "Απόγευμα", "Βράδυ"},
"en": {"Morning", "Noon", "Evening", "Night"},
"eo": {"Mateno", "Tago", "Vespero", "Nokto"},
"es": {"Mañana", "Mediodía", "Tarde", "Noche"},
"et": {"Hommik", "Päev", "Õhtu", "Öösel"},
"eu": {"Goiza", "Eguerdia", "Arratsaldea", "Gaua"},
"fa": {"حبص", "رهظ", "رصع", "بش"},
"fi": {"Aamu", "Keskipäivä", "Ilta", "Yö"},
"fr": {"Matin", "Après-midi", "Soir", "Nuit"},
"fy": {"Moarns", "Middeis", "Jûns", "Nachts"},
"ga": {"Maidin", "Nóin", "Tráthnóna", "Oíche"},
"he": {"רקוב", "םוֹיְ", "ברֶעֶ", "הלָיְלַ"},
"hi": {"प्रातःकाल", "दोपहर", "सायंकाल", "रात"},
"hr": {"Jutro", "Dan", "Večer", "Noć"},
"hu": {"Reggel", "Dél", "Este", "Éjszaka"},
"hy": {"Առավոտ", "Կեսօր", "Երեկո", "Գիշեր"},
"ia": {"Matino", "Mediedie", "Vespere", "Nocte"},
"id": {"Pagi", "Hari", "Petang", "Malam"},
"it": {"Mattina", "Pomeriggio", "Sera", "Notte"},
"is": {"Morgunn", "Dagur", "Kvöld", "Nótt"},
"ja": {"朝", "昼", "夕", "夜"},
"jv": {"Morning", "Noon", "Evening", "Night"},
"ka": {"დილა", "დღე", "საღამო", "ღამე"},
"kk": {"Таң", "Күндіз", "Кеш", "Түн"},
"ko": {"아침", "낮", "저녁", "밤"},
"ky": {"Эртең", "Күн", "Кеч", "Түн"},
"lt": {"Rytas", "Diena", "Vakaras", "Naktis"},
"lv": {"Rīts", "Diena", "Vakars", "Nakts"},
"mk": {"Утро", "Пладне", "Вечер", "Ноќ"},
"ml": {"രാവിലെ", "മധ്യാഹ്നം", "വൈകുന്നേരം", "രാത്രി"},
"mr": {"सकाळ", "दुपार", "संध्याकाळ", "रात्र"},
"nl": {"'s Ochtends", "'s Middags", "'s Avonds", "'s Nachts"},
"nb": {"Morgen", "Middag", "Kveld", "Natt"},
"nn": {"Morgon", "Middag", "Kveld", "Natt"},
"oc": {"Matin", "Jorn", "Vèspre", "Nuèch"},
"pl": {"Ranek", "Dzień", "Wieczór", "Noc"},
"pt": {"Manhã", "Meio-dia", "Tarde", "Noite"},
"pt-br": {"Manhã", "Meio-dia", "Tarde", "Noite"},
"ro": {"Dimineaţă", "Amiază", "Seară", "Noapte"},
"ru": {"Утро", "День", "Вечер", "Ночь"},
"sk": {"Ráno", "Cez deň", "Večer", "V noci"},
"sl": {"Jutro", "Dan", "Večer", "Noč"},
"sr": {"Јутро", "Подне", "Вече", "Ноћ"},
"sr-lat": {"Jutro", "Podne", "Veče", "Noć"},
"sv": {"Morgon", "Eftermiddag", "Kväll", "Natt"},
"sw": {"Asubuhi", "Adhuhuri", "Jioni", "Usiku"},
"ta": {"காலை", "நண்பகல்", "சாயங்காலம்", "இரவு"},
"te": {"ఉదయం", "రోజు", "సాయంత్రం", "రాత్రి"},
"th": {"เช้า", "วัน", "เย็น", "คืน"},
"tr": {"Sabah", "Öğle", "Akşam", "Gece"},
"uk": {"Ранок", "День", "Вечір", "Ніч"},
"uz": {"Ertalab", "Kunduzi", "Kechqurun", "Kecha"},
"vi": {"Sáng", "Trưa", "Chiều", "Tối"},
"zh": {"早上", "中午", "傍晚", "夜间"},
"zh-cn": {"早上", "中午", "傍晚", "夜间"},
"zh-tw": {"早上", "中午", "傍晚", "夜間"},
"zu": {"Morning", "Noon", "Evening", "Night"},
"mg": {"Maraina", "Tolakandro", "Ariva", "Alina"},
}
)
// Add this languages:
// da tr hu sr jv zu
// More languages: https://developer.worldweatheronline.com/api/multilingual.aspx
// const (
// wuri = "https://api.worldweatheronline.com/premium/v1/weather.ashx?"
// suri = "https://api.worldweatheronline.com/premium/v1/search.ashx?"
// slotcount = 4
// )
const (
wuri = "http://127.0.0.1:5001/premium/v1/weather.ashx?"
suri = "http://127.0.0.1:5001/premium/v1/search.ashx?"
slotcount = 4
)
func configload() error {
b, err := ioutil.ReadFile(configpath)
if err == nil {
return json.Unmarshal(b, &config)
}
return err
}
func configsave() error {
j, err := json.MarshalIndent(config, "", "\t")
if err == nil {
return ioutil.WriteFile(configpath, j, 0600)
}
return err
}
func pad(s string, mustLen int) (ret string) {
ret = s
realLen := utf8.RuneCountInString(ansiEsc.ReplaceAllLiteralString(s, ""))
delta := mustLen - realLen
if delta > 0 {
if config.RightToLeft {
ret = strings.Repeat(" ", delta) + ret + "\033[0m"
} else {
ret += "\033[0m" + strings.Repeat(" ", delta)
}
} else if delta < 0 {
toks := ansiEsc.Split(s, 2)
tokLen := utf8.RuneCountInString(toks[0])
esc := ansiEsc.FindString(s)
if tokLen > mustLen {
ret = fmt.Sprintf("%.*s\033[0m", mustLen, toks[0])
} else {
ret = fmt.Sprintf("%s%s%s", toks[0], esc, pad(toks[1], mustLen-tokLen))
}
}
return
}
func formatTemp(c cond) string {
color := func(temp int, explicitPlus bool) string {
var col = 0
if !config.Inverse {
// Extemely cold temperature must be shown with violet
// because dark blue is too dark
col = 165
switch temp {
case -15, -14, -13:
col = 171
case -12, -11, -10:
col = 33
case -9, -8, -7:
col = 39
case -6, -5, -4:
col = 45
case -3, -2, -1:
col = 51
case 0, 1:
col = 50
case 2, 3:
col = 49
case 4, 5:
col = 48
case 6, 7:
col = 47
case 8, 9:
col = 46
case 10, 11, 12:
col = 82
case 13, 14, 15:
col = 118
case 16, 17, 18:
col = 154
case 19, 20, 21:
col = 190
case 22, 23, 24:
col = 226
case 25, 26, 27:
col = 220
case 28, 29, 30:
col = 214
case 31, 32, 33:
col = 208
case 34, 35, 36:
col = 202
default:
if temp > 0 {
col = 196
}
}
} else {
col = 16
switch temp {
case -15, -14, -13:
col = 17
case -12, -11, -10:
col = 18
case -9, -8, -7:
col = 19
case -6, -5, -4:
col = 20
case -3, -2, -1:
col = 21
case 0, 1:
col = 30
case 2, 3:
col = 28
case 4, 5:
col = 29
case 6, 7:
col = 30
case 8, 9:
col = 34
case 10, 11, 12:
col = 35
case 13, 14, 15:
col = 36
case 16, 17, 18:
col = 40
case 19, 20, 21:
col = 59
case 22, 23, 24:
col = 100
case 25, 26, 27:
col = 101
case 28, 29, 30:
col = 94
case 31, 32, 33:
col = 166
case 34, 35, 36:
col = 52
default:
if temp > 0 {
col = 196
}
}
}
if config.Imperial {
temp = (temp*18 + 320) / 10
}
if explicitPlus {
return fmt.Sprintf("\033[38;5;%03dm+%d\033[0m", col, temp)
}
return fmt.Sprintf("\033[38;5;%03dm%d\033[0m", col, temp)
}
t := c.TempC
if t == 0 {
t = c.TempC2
}
// hyphen := " - "
// if (config.Lang == "sl") {
// hyphen = "-"
// }
// hyphen = ".."
explicitPlus1 := false
explicitPlus2 := false
if c.FeelsLikeC != t {
if t > 0 {
explicitPlus1 = true
}
if c.FeelsLikeC > 0 {
explicitPlus2 = true
}
if explicitPlus1 {
explicitPlus2 = false
}
return pad(
fmt.Sprintf("%s(%s) °%s",
color(t, explicitPlus1),
color(c.FeelsLikeC, explicitPlus2),
unitTemp[config.Imperial]),
15)
}
// if c.FeelsLikeC < t {
// if c.FeelsLikeC < 0 && t > 0 {
// explicitPlus = true
// }
// return pad(fmt.Sprintf("%s%s%s °%s", color(c.FeelsLikeC, false), hyphen, color(t, explicitPlus), unitTemp[config.Imperial]), 15)
// } else if c.FeelsLikeC > t {
// if t < 0 && c.FeelsLikeC > 0 {
// explicitPlus = true
// }
// return pad(fmt.Sprintf("%s%s%s °%s", color(t, false), hyphen, color(c.FeelsLikeC, explicitPlus), unitTemp[config.Imperial]), 15)
// }
return pad(fmt.Sprintf("%s °%s", color(c.FeelsLikeC, false), unitTemp[config.Imperial]), 15)
}
func formatWind(c cond) string {
windInRightUnits := func(spd int) int {
if config.WindMS {
spd = (spd * 1000) / 3600
} else {
if config.Imperial {
spd = (spd * 1000) / 1609
}
}
return spd
}
color := func(spd int) string {
var col = 46
switch spd {
case 1, 2, 3:
col = 82
case 4, 5, 6:
col = 118
case 7, 8, 9:
col = 154
case 10, 11, 12:
col = 190
case 13, 14, 15:
col = 226
case 16, 17, 18, 19:
col = 220
case 20, 21, 22, 23:
col = 214
case 24, 25, 26, 27:
col = 208
case 28, 29, 30, 31:
col = 202
default:
if spd > 0 {
col = 196
}
}
spd = windInRightUnits(spd)
return fmt.Sprintf("\033[38;5;%03dm%d\033[0m", col, spd)
}
unitWindString := unitWind[0]
if config.WindMS {
unitWindString = unitWind[2]
} else {
if config.Imperial {
unitWindString = unitWind[1]
}
}
hyphen := " - "
// if (config.Lang == "sl") {
// hyphen = "-"
// }
hyphen = "-"
cWindGustKmph := color(c.WindGustKmph)
cWindspeedKmph := color(c.WindspeedKmph)
if windInRightUnits(c.WindGustKmph) > windInRightUnits(c.WindspeedKmph) {
return pad(fmt.Sprintf("%s %s%s%s %s", windDir[c.Winddir16Point], cWindspeedKmph, hyphen, cWindGustKmph, unitWindString), 15)
}
return pad(fmt.Sprintf("%s %s %s", windDir[c.Winddir16Point], cWindspeedKmph, unitWindString), 15)
}
func formatVisibility(c cond) string {
if config.Imperial {
c.VisibleDistKM = (c.VisibleDistKM * 621) / 1000
}
return pad(fmt.Sprintf("%d %s", c.VisibleDistKM, unitVis[config.Imperial]), 15)
}
func formatRain(c cond) string {
rainUnit := float32(c.PrecipMM)
if config.Imperial {
rainUnit = float32(c.PrecipMM) * 0.039
}
if c.ChanceOfRain != "" {
return pad(fmt.Sprintf("%.1f %s | %s%%", rainUnit, unitRain[config.Imperial], c.ChanceOfRain), 15)
}
return pad(fmt.Sprintf("%.1f %s", rainUnit, unitRain[config.Imperial]), 15)
}
func formatCond(cur []string, c cond, current bool) (ret []string) {
var icon []string
if i, ok := codes[c.WeatherCode]; !ok {
icon = iconUnknown
} else {
icon = i
}
if config.Inverse {
// inverting colors
for i := range icon {
icon[i] = strings.Replace(icon[i], "38;5;226", "38;5;94", -1)
icon[i] = strings.Replace(icon[i], "38;5;250", "38;5;243", -1)
icon[i] = strings.Replace(icon[i], "38;5;21", "38;5;18", -1)
icon[i] = strings.Replace(icon[i], "38;5;255", "38;5;245", -1)
icon[i] = strings.Replace(icon[i], "38;5;111", "38;5;63", -1)
icon[i] = strings.Replace(icon[i], "38;5;251", "38;5;238", -1)
}
}
//desc := fmt.Sprintf("%-15.15v", c.WeatherDesc[0].Value)
desc := c.WeatherDesc[0].Value
if config.RightToLeft {
for runewidth.StringWidth(desc) < 15 {
desc = " " + desc
}
for runewidth.StringWidth(desc) > 15 {
_, size := utf8.DecodeLastRuneInString(desc)
desc = desc[size:]
}
} else {
for runewidth.StringWidth(desc) < 15 {
desc += " "
}
for runewidth.StringWidth(desc) > 15 {
_, size := utf8.DecodeLastRuneInString(desc)
desc = desc[:len(desc)-size]
}
}
if current {
if config.RightToLeft {
desc = c.WeatherDesc[0].Value
if runewidth.StringWidth(desc) < 15 {
desc = strings.Repeat(" ", 15-runewidth.StringWidth(desc)) + desc
}
} else {
desc = c.WeatherDesc[0].Value
}
} else {
if config.RightToLeft {
if frstRune, size := utf8.DecodeRuneInString(desc); frstRune != ' ' {
desc = "…" + desc[size:]
for runewidth.StringWidth(desc) < 15 {
desc = " " + desc
}
}
} else {
if lastRune, size := utf8.DecodeLastRuneInString(desc); lastRune != ' ' {
desc = desc[:len(desc)-size] + "…"
//for numberOfSpaces < runewidth.StringWidth(fmt.Sprintf("%c", lastRune)) - 1 {
for runewidth.StringWidth(desc) < 15 {
desc = desc + " "
}
}
}
}
if config.RightToLeft {
ret = append(ret, fmt.Sprintf("%v %v %v", cur[0], desc, icon[0]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[1], formatTemp(c), icon[1]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[2], formatWind(c), icon[2]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[3], formatVisibility(c), icon[3]))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[4], formatRain(c), icon[4]))
} else {
ret = append(ret, fmt.Sprintf("%v %v %v", cur[0], icon[0], desc))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[1], icon[1], formatTemp(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[2], icon[2], formatWind(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[3], icon[3], formatVisibility(c)))
ret = append(ret, fmt.Sprintf("%v %v %v", cur[4], icon[4], formatRain(c)))
}
return
}
func justifyCenter(s string, width int) string {
appendSide := 0
for runewidth.StringWidth(s) <= width {
if appendSide == 1 {
s = s + " "
appendSide = 0
} else {
s = " " + s
appendSide = 1
}
}
return s
}
func reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
func printDay(w weather) (ret []string) {
hourly := w.Hourly
ret = make([]string, 5)
for i := range ret {
ret[i] = "│"
}
// find hourly data which fits the desired times of day best
var slots [slotcount]cond
for _, h := range hourly {
c := int(math.Mod(float64(h.Time), 100)) + 60*(h.Time/100)
for i, s := range slots {
if math.Abs(float64(c-slotTimes[i])) < math.Abs(float64(s.Time-slotTimes[i])) {
h.Time = c
slots[i] = h
}
}
}
if config.RightToLeft {
slots[0], slots[3] = slots[3], slots[0]
slots[1], slots[2] = slots[2], slots[1]
}
for i, s := range slots {
if config.Narrow {
if i == 0 || i == 2 {
continue
}
}
ret = formatCond(ret, s, false)
for i := range ret {
ret[i] = ret[i] + "│"
}
}
d, _ := time.Parse("2006-01-02", w.Date)
// dateFmt := "┤ " + d.Format("Mon 02. Jan") + " ├"
if val, ok := locale[config.Lang]; ok {
lctime.SetLocale(val)
} else {
lctime.SetLocale("en_US")
}
dateName := ""
if config.RightToLeft {
dow := lctime.Strftime("%a", d)
day := lctime.Strftime("%d", d)
month := lctime.Strftime("%b", d)
dateName = reverse(month) + " " + day + " " + reverse(dow)
} else {
dateName = lctime.Strftime("%a %d %b", d)
if config.Lang == "ko" {
dateName = lctime.Strftime("%b %d일 %a", d)
}
if config.Lang == "zh" || config.Lang == "zh-tw" || config.Lang == "zh-cn" {
dateName = lctime.Strftime("%b%d日%A", d)
}
}
// appendSide := 0
// // for utf8.RuneCountInString(dateName) <= dateWidth {
// for runewidth.StringWidth(dateName) <= dateWidth {
// if appendSide == 1 {
// dateName = dateName + " "
// appendSide = 0
// } else {
// dateName = " " + dateName
// appendSide = 1
// }
// }
dateFmt := "┤" + justifyCenter(dateName, 12) + "├"
trans := daytimeTranslation["en"]
if t, ok := daytimeTranslation[config.Lang]; ok {
trans = t
}
if config.Narrow {
names := "│ " + justifyCenter(trans[1], 16) +
"└──────┬──────┘" + justifyCenter(trans[3], 16) + " │"
ret = append([]string{
" ┌─────────────┐ ",
"┌───────────────────────" + dateFmt + "───────────────────────┐",
names,
"├──────────────────────────────┼──────────────────────────────┤"},
ret...)
return append(ret,
"└──────────────────────────────┴──────────────────────────────┘")
}
names := ""
if config.RightToLeft {
names = "│" + justifyCenter(trans[3], 29) + "│ " + justifyCenter(trans[2], 16) +
"└──────┬──────┘" + justifyCenter(trans[1], 16) + " │" + justifyCenter(trans[0], 29) + "│"
} else {
names = "│" + justifyCenter(trans[0], 29) + "│ " + justifyCenter(trans[1], 16) +
"└──────┬──────┘" + justifyCenter(trans[2], 16) + " │" + justifyCenter(trans[3], 29) + "│"
}
ret = append([]string{
" ┌─────────────┐ ",
"┌──────────────────────────────┬───────────────────────" + dateFmt + "───────────────────────┬──────────────────────────────┐",
names,
"├──────────────────────────────┼──────────────────────────────┼──────────────────────────────┼──────────────────────────────┤"},
ret...)
return append(ret,
"└──────────────────────────────┴──────────────────────────────┴──────────────────────────────┴──────────────────────────────┘")
}
func unmarshalLang(body []byte, r *resp) error {
var rv map[string]interface{}
if err := json.Unmarshal(body, &rv); err != nil {
return err
}
if data, ok := rv["data"].(map[string]interface{}); ok {
if ccs, ok := data["current_condition"].([]interface{}); ok {
for _, cci := range ccs {
cc, ok := cci.(map[string]interface{})
if !ok {
continue
}
langs, ok := cc["lang_"+config.Lang].([]interface{})
if !ok || len(langs) == 0 {
continue
}
weatherDesc, ok := cc["weatherDesc"].([]interface{})
if !ok || len(weatherDesc) == 0 {
continue
}
weatherDesc[0] = langs[0]
}
}
if ws, ok := data["weather"].([]interface{}); ok {
for _, wi := range ws {
w, ok := wi.(map[string]interface{})
if !ok {
continue
}
if hs, ok := w["hourly"].([]interface{}); ok {
for _, hi := range hs {
h, ok := hi.(map[string]interface{})
if !ok {
continue
}
langs, ok := h["lang_"+config.Lang].([]interface{})
if !ok || len(langs) == 0 {
continue
}
weatherDesc, ok := h["weatherDesc"].([]interface{})
if !ok || len(weatherDesc) == 0 {
continue
}
weatherDesc[0] = langs[0]
}
}
}
}
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(rv); err != nil {
return err
}
if err := json.NewDecoder(&buf).Decode(r); err != nil {
return err
}
return nil
}
func getDataFromAPI() (ret resp) {
var params []string
if len(config.APIKey) == 0 {
log.Fatal("No API key specified. Setup instructions are in the README.")
}
params = append(params, "key="+config.APIKey)
// non-flag shortcut arguments will overwrite possible flag arguments
for _, arg := range flag.Args() {
if v, err := strconv.Atoi(arg); err == nil && len(arg) == 1 {
config.Numdays = v
} else {
config.City = arg
}
}
if len(config.City) > 0 {
params = append(params, "q="+url.QueryEscape(config.City))
}
params = append(params, "format=json")
params = append(params, "num_of_days="+strconv.Itoa(config.Numdays))
params = append(params, "tp=3")
if config.Lang != "" {
params = append(params, "lang="+config.Lang)
}
if debug {
fmt.Fprintln(os.Stderr, params)
}
res, err := http.Get(wuri + strings.Join(params, "&"))
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
if debug {
var out bytes.Buffer
json.Indent(&out, body, "", " ")
out.WriteTo(os.Stderr)
fmt.Print("\n\n")
}
if config.Lang == "" {
if err = json.Unmarshal(body, &ret); err != nil {
log.Println(err)
}
} else {
if err = unmarshalLang(body, &ret); err != nil {
log.Println(err)
}
}
return
}
func init() {
flag.IntVar(&config.Numdays, "days", 3, "Number of days of weather forecast to be displayed")
flag.StringVar(&config.Lang, "lang", "en", "Language of the report")
flag.StringVar(&config.City, "city", "New York", "City to be queried")
flag.BoolVar(&debug, "debug", false, "Print out raw json response for debugging purposes")
flag.BoolVar(&config.Imperial, "imperial", false, "Use imperial units")
flag.BoolVar(&config.Inverse, "inverse", false, "Use inverted colors")
flag.BoolVar(&config.Narrow, "narrow", false, "Narrow output (two columns)")
flag.StringVar(&config.LocationName, "location_name", "", "Location name (used in the caption)")
flag.BoolVar(&config.WindMS, "wind_in_ms", false, "Show wind speed in m/s")
flag.BoolVar(&config.RightToLeft, "right_to_left", false, "Right to left script")
configpath = os.Getenv("WEGORC")
if configpath == "" {
usr, err := user.Current()
if err != nil {
log.Fatalf("%v\nYou can set the environment variable WEGORC to point to your config file as a workaround.", err)
}
configpath = path.Join(usr.HomeDir, ".wegorc")
}
config.APIKey = ""
config.Imperial = false
config.Lang = "en"
err := configload()
if _, ok := err.(*os.PathError); ok {
log.Printf("No config file found. Creating %s ...", configpath)
if err2 := configsave(); err2 != nil {
log.Fatal(err2)
}
} else if err != nil {
log.Fatalf("could not parse %v: %v", configpath, err)
}
ansiEsc = regexp.MustCompile("\033.*?m")
}
func main() {
flag.Parse()
r := getDataFromAPI()
if r.Data.Req == nil || len(r.Data.Req) < 1 {
if r.Data.Err != nil && len(r.Data.Err) >= 1 {
log.Fatal(r.Data.Err[0].Msg)
}
log.Fatal("Malformed response.")
}
locationName := r.Data.Req[0].Query
if config.LocationName != "" {
locationName = config.LocationName
}
if config.Lang == "he" || config.Lang == "ar" || config.Lang == "fa" {
config.RightToLeft = true
}
if caption, ok := localizedCaption[config.Lang]; !ok {
// r.Data.Req[0].Type,
fmt.Printf("Weather report: %s\n\n", locationName)
} else {
if config.RightToLeft {
caption = locationName + " " + caption
space := strings.Repeat(" ", 125-runewidth.StringWidth(caption))
fmt.Printf("%s%s\n\n", space, caption)
} else {
fmt.Printf("%s %s\n\n", caption, locationName)
}
}
stdout := colorable.NewColorableStdout()
if r.Data.Cur == nil || len(r.Data.Cur) < 1 {
log.Fatal("No weather data available.")
}
out := formatCond(make([]string, 5), r.Data.Cur[0], true)
for _, val := range out {
if config.RightToLeft {
fmt.Fprint(stdout, strings.Repeat(" ", 94))
} else {
fmt.Fprint(stdout, " ")
}
fmt.Fprintln(stdout, val)
}
if config.Numdays == 0 {
return
}
if r.Data.Weather == nil {
log.Fatal("No detailed weather forecast available.")
}
for _, d := range r.Data.Weather {
for _, val := range printDay(d) {
fmt.Fprintln(stdout, val)
}
}
}
| [
"\"WEGORC\""
]
| []
| [
"WEGORC"
]
| [] | ["WEGORC"] | go | 1 | 0 | |
testy/e2e/test017.py | # Import bibliotek
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from faker import Faker
from time import sleep
import unittest
import os
# DANE TESTOWE
login = os.environ['LOGIN']
password = os.environ['PASSWORD']
class RegistrationTest(unittest.TestCase):
"""
Scenariusz : Umieszczenie na liście Pending pozycji o tej samej nazwie
"""
def setUp(self):
# Przygotowanie testu
# Warunki wstępne testu
# Otwarcie przeglądarki
self.driver = webdriver.Chrome()
# Otwarcie strony
self.driver.get(os.environ['APP_URL'])
# Maksymalizacja okna
self.driver.maximize_window()
# Ustawienie bezwarunkowego czekania na elementy przy wyszukiwaniu
# maks. 10 sekund
self.driver.implicitly_wait(10)
self.faker = Faker()
def testUserRegister(self):
# Faktyczny test
driver = self.driver
# Kroki
# 1. Kliknij „Login”
# Szukam elementu
# self.driver.find_element_by_partial_link_text("Login") # Selenium 3
# Metoda find_element zwraca instancję klasy WebElement
sign_in_link = driver.find_element(
By.PARTIAL_LINK_TEXT, "Login") # Selenium 4
# Klkiknij element
sign_in_link.click()
# 2. Wpsz unikalną nazwę użytkownika”
login_input = driver.find_element(By.ID, "input-username")
login_input.send_keys(login)
# 3. Wpisz hasło
password_input = driver.find_element(By.ID, "input-password")
password_input.send_keys(password)
# 4. Kliknij Login
register_btn = driver.find_element(
By.XPATH, '//button[@type="submit"]') # WebElement
register_btn.click()
sleep(1)
# 5. Kliknięcie w zakładkę "Todo"
Todo_btn = driver.find_element(
By.XPATH, '//a[@href="/frontend-vue/todo"]')
Todo_btn.click()
# 6. Wyświetlenie pola TodoApp
sleep(5)
title = driver.find_element(By.TAG_NAME, 'h1').text
expectedTitle = "Todo App"
self.assertEqual(title, expectedTitle)
name_input = driver.find_element(By.ID, "input-name")
name_input.send_keys(self.faker.name())
# 7. Kliknij Add
Add_btn = driver.find_element(
By.XPATH, '//form//button[@type="submit"]') # WebElement
Add_btn.click()
Add_btn.click()
sleep(5)
def tearDown(self):
# Zakończenie testu
# Wyłączenie przeglądarki
self.driver.quit()
if __name__ == '__main__':
unittest.main() # (verbosity=4)
| []
| []
| [
"LOGIN",
"APP_URL",
"PASSWORD"
]
| [] | ["LOGIN", "APP_URL", "PASSWORD"] | python | 3 | 0 | |
conanfile.py | from conans import ConanFile, CMake, tools
import os, platform
named_type_version = os.getenv('NAMED_TYPE_VERSION', '1.0')
class NamedTypeConan(ConanFile):
name = "named-type"
license = "MIT"
url = "https://github.com/BentouDev/conan-named-type"
version = named_type_version
description = "NamedType can be used to declare a strong type with a typedef-like syntax"
no_copy_source = True
exports_sources = ["named-type-source/*"]
def package_id(self):
self.info.header_only()
def package(self):
self.copy(pattern="*.hpp", dst="include", src="named-type-source") | []
| []
| [
"NAMED_TYPE_VERSION"
]
| [] | ["NAMED_TYPE_VERSION"] | python | 1 | 0 | |
_scripts/make.go | package main
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/go-delve/delve/pkg/goversion"
"github.com/spf13/cobra"
)
const DelveMainPackagePath = "github.com/go-delve/delve/cmd/dlv"
var Verbose bool
var NOTimeout bool
var TestIncludePIE bool
var TestSet, TestRegex, TestBackend, TestBuildMode string
func NewMakeCommands() *cobra.Command {
RootCommand := &cobra.Command{
Use: "make.go",
Short: "make script for delve.",
}
RootCommand.AddCommand(&cobra.Command{
Use: "check-cert",
Short: "Check certificate for macOS.",
Run: checkCertCmd,
})
RootCommand.AddCommand(&cobra.Command{
Use: "build",
Short: "Build delve",
Run: func(cmd *cobra.Command, args []string) {
tagFlag := prepareMacnative()
execute("go", "build", tagFlag, buildFlags(), DelveMainPackagePath)
if runtime.GOOS == "darwin" && os.Getenv("CERT") != "" && canMacnative() {
codesign("./dlv")
}
},
})
RootCommand.AddCommand(&cobra.Command{
Use: "install",
Short: "Installs delve",
Run: func(cmd *cobra.Command, args []string) {
tagFlag := prepareMacnative()
execute("go", "install", tagFlag, buildFlags(), DelveMainPackagePath)
if runtime.GOOS == "darwin" && os.Getenv("CERT") != "" && canMacnative() {
codesign(installedExecutablePath())
}
},
})
RootCommand.AddCommand(&cobra.Command{
Use: "uninstall",
Short: "Uninstalls delve",
Run: func(cmd *cobra.Command, args []string) {
execute("go", "clean", "-i", DelveMainPackagePath)
},
})
test := &cobra.Command{
Use: "test",
Short: "Tests delve",
Long: `Tests delve.
Use the flags -s, -r and -b to specify which tests to run. Specifying nothing will run all tests relevant for the current environment (see testStandard).
`,
Run: testCmd,
}
test.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "Verbose tests")
test.PersistentFlags().BoolVarP(&NOTimeout, "timeout", "t", false, "Set infinite timeouts")
test.PersistentFlags().StringVarP(&TestSet, "test-set", "s", "", `Select the set of tests to run, one of either:
all tests all packages
basic tests proc, integration and terminal
integration tests github.com/go-delve/delve/service/test
package-name test the specified package only
`)
test.PersistentFlags().StringVarP(&TestRegex, "test-run", "r", "", `Only runs the tests matching the specified regex. This option can only be specified if testset is a single package`)
test.PersistentFlags().StringVarP(&TestBackend, "test-backend", "b", "", `Runs tests for the specified backend only, one of either:
default the default backend
lldb lldb backend
rr rr backend
This option can only be specified if testset is basic or a single package.`)
test.PersistentFlags().StringVarP(&TestBuildMode, "test-build-mode", "m", "", `Runs tests compiling with the specified build mode, one of either:
normal normal buildmode (default)
pie PIE buildmode
This option can only be specified if testset is basic or a single package.`)
test.PersistentFlags().BoolVarP(&TestIncludePIE, "pie", "", true, "Standard testing should include PIE")
RootCommand.AddCommand(test)
RootCommand.AddCommand(&cobra.Command{
Use: "vendor",
Short: "vendors dependencies",
Run: func(cmd *cobra.Command, args []string) {
execute("go", "mod", "vendor")
},
})
return RootCommand
}
func checkCert() bool {
// If we're on OSX make sure the proper CERT env var is set.
if os.Getenv("TRAVIS") == "true" || runtime.GOOS != "darwin" || os.Getenv("CERT") != "" {
return true
}
x := exec.Command("_scripts/gencert.sh")
x.Stdout = os.Stdout
x.Stderr = os.Stderr
x.Env = os.Environ()
err := x.Run()
if x.ProcessState != nil && !x.ProcessState.Success() {
fmt.Printf("An error occurred when generating and installing a new certificate\n")
return false
}
if err != nil {
fmt.Printf("An error occoured when generating and installing a new certificate: %v\n", err)
return false
}
os.Setenv("CERT", "dlv-cert")
return true
}
func checkCertCmd(cmd *cobra.Command, args []string) {
if !checkCert() {
os.Exit(1)
}
}
func strflatten(v []interface{}) []string {
r := []string{}
for _, s := range v {
switch s := s.(type) {
case []string:
r = append(r, s...)
case string:
if s != "" {
r = append(r, s)
}
}
}
return r
}
func executeq(cmd string, args ...interface{}) {
x := exec.Command(cmd, strflatten(args)...)
x.Stdout = os.Stdout
x.Stderr = os.Stderr
x.Env = os.Environ()
err := x.Run()
if x.ProcessState != nil && !x.ProcessState.Success() {
os.Exit(1)
}
if err != nil {
log.Fatal(err)
}
}
func execute(cmd string, args ...interface{}) {
fmt.Printf("%s %s\n", cmd, strings.Join(quotemaybe(strflatten(args)), " "))
executeq(cmd, args...)
}
func quotemaybe(args []string) []string {
for i := range args {
if strings.Index(args[i], " ") >= 0 {
args[i] = fmt.Sprintf("%q", args[i])
}
}
return args
}
func getoutput(cmd string, args ...interface{}) string {
x := exec.Command(cmd, strflatten(args)...)
x.Env = os.Environ()
out, err := x.Output()
if err != nil {
fmt.Fprintf(os.Stderr, "Error executing %s %v\n", cmd, args)
log.Fatal(err)
}
if !x.ProcessState.Success() {
fmt.Fprintf(os.Stderr, "Error executing %s %v\n", cmd, args)
os.Exit(1)
}
return string(out)
}
func codesign(path string) {
execute("codesign", "-s", os.Getenv("CERT"), path)
}
func installedExecutablePath() string {
if gobin := os.Getenv("GOBIN"); gobin != "" {
return filepath.Join(gobin, "dlv")
}
gopath := strings.Split(getoutput("go", "env", "GOPATH"), ":")
return filepath.Join(strings.TrimSpace(gopath[0]), "bin", "dlv")
}
// canMacnative returns true if we can build the native backend for macOS,
// i.e. cgo enabled and the legacy SDK headers:
// https://forums.developer.apple.com/thread/104296
func canMacnative() bool {
if !(runtime.GOOS == "darwin" && runtime.GOARCH == "amd64") {
return false
}
if strings.TrimSpace(getoutput("go", "env", "CGO_ENABLED")) != "1" {
return false
}
macOSVersion := strings.Split(strings.TrimSpace(getoutput("/usr/bin/sw_vers", "-productVersion")), ".")
major, err := strconv.ParseInt(macOSVersion[0], 10, 64)
if err != nil {
return false
}
minor, err := strconv.ParseInt(macOSVersion[1], 10, 64)
if err != nil {
return false
}
typesHeader := "/usr/include/sys/types.h"
if major >= 11 || (major == 10 && minor >= 15) {
typesHeader = "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/sys/types.h"
}
_, err = os.Stat(typesHeader)
if err != nil {
return false
}
return true
}
// prepareMacnative checks if we can build the native backend for macOS and
// if we can checks the certificate and then returns the -tags flag.
func prepareMacnative() string {
if !canMacnative() {
return ""
}
if !checkCert() {
return ""
}
return "-tags=macnative"
}
func buildFlags() []string {
buildSHA, err := exec.Command("git", "rev-parse", "HEAD").CombinedOutput()
if err != nil {
log.Fatal(err)
}
ldFlags := "-X main.Build=" + strings.TrimSpace(string(buildSHA))
if runtime.GOOS == "darwin" {
ldFlags = "-s " + ldFlags
}
return []string{fmt.Sprintf("-ldflags=%s", ldFlags)}
}
func testFlags() []string {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
testFlags := []string{"-count", "1", "-p", "1"}
if Verbose {
testFlags = append(testFlags, "-v")
}
if NOTimeout {
testFlags = append(testFlags, "-timeout", "0")
} else if os.Getenv("TRAVIS") == "true" {
// Make test timeout shorter than Travis' own timeout so that Go can report which test hangs.
testFlags = append(testFlags, "-timeout", "9m")
}
if len(os.Getenv("TEAMCITY_VERSION")) > 0 {
testFlags = append(testFlags, "-json")
}
if runtime.GOOS == "darwin" {
testFlags = append(testFlags, "-exec="+wd+"/_scripts/testsign")
}
return testFlags
}
func testCmd(cmd *cobra.Command, args []string) {
checkCertCmd(nil, nil)
if os.Getenv("TRAVIS") == "true" && runtime.GOOS == "darwin" {
fmt.Println("Building with native backend")
execute("go", "build", "-tags=macnative", buildFlags(), DelveMainPackagePath)
fmt.Println("\nBuilding without native backend")
execute("go", "build", buildFlags(), DelveMainPackagePath)
fmt.Println("\nTesting")
os.Setenv("PROCTEST", "lldb")
executeq("sudo", "-E", "go", "test", testFlags(), allPackages())
return
}
if TestSet == "" && TestBackend == "" && TestBuildMode == "" {
if TestRegex != "" {
fmt.Printf("Can not use --test-run without --test-set\n")
os.Exit(1)
}
testStandard()
return
}
if TestSet == "" {
TestSet = "all"
}
if TestBackend == "" {
TestBackend = "default"
}
if TestBuildMode == "" {
TestBuildMode = "normal"
}
testCmdIntl(TestSet, TestRegex, TestBackend, TestBuildMode)
}
func testStandard() {
fmt.Println("Testing default backend")
testCmdIntl("all", "", "default", "normal")
if inpath("lldb-server") && !goversion.VersionAfterOrEqual(runtime.Version(), 1, 14) {
fmt.Println("\nTesting LLDB backend")
testCmdIntl("basic", "", "lldb", "normal")
}
if inpath("rr") {
fmt.Println("\nTesting RR backend")
testCmdIntl("basic", "", "rr", "normal")
}
if TestIncludePIE && (runtime.GOOS == "linux" || (runtime.GOOS == "windows" && goversion.VersionAfterOrEqual(runtime.Version(), 1, 15))) {
fmt.Println("\nTesting PIE buildmode, default backend")
testCmdIntl("basic", "", "default", "pie")
testCmdIntl("core", "", "default", "pie")
}
if runtime.GOOS == "linux" && inpath("rr") {
fmt.Println("\nTesting PIE buildmode, RR backend")
testCmdIntl("basic", "", "rr", "pie")
}
}
func testCmdIntl(testSet, testRegex, testBackend, testBuildMode string) {
testPackages := testSetToPackages(testSet)
if len(testPackages) == 0 {
fmt.Printf("Unknown test set %q\n", testSet)
os.Exit(1)
}
if testRegex != "" && len(testPackages) != 1 {
fmt.Printf("Can not use test-run with test set %q\n", testSet)
os.Exit(1)
}
backendFlag := ""
if testBackend != "" && testBackend != "default" {
if testSet != "basic" && len(testPackages) != 1 {
fmt.Printf("Can not use test-backend with test set %q\n", testSet)
os.Exit(1)
}
backendFlag = "-backend=" + testBackend
}
buildModeFlag := ""
if testBuildMode != "" && testBuildMode != "normal" {
if testSet != "basic" && len(testPackages) != 1 {
fmt.Printf("Can not use test-buildmode with test set %q\n", testSet)
os.Exit(1)
}
buildModeFlag = "-test-buildmode=" + testBuildMode
}
if len(testPackages) > 3 {
executeq("go", "test", testFlags(), buildFlags(), testPackages, backendFlag, buildModeFlag)
} else if testRegex != "" {
execute("go", "test", testFlags(), buildFlags(), testPackages, "-run="+testRegex, backendFlag, buildModeFlag)
} else {
execute("go", "test", testFlags(), buildFlags(), testPackages, backendFlag, buildModeFlag)
}
}
func testSetToPackages(testSet string) []string {
switch testSet {
case "", "all":
return allPackages()
case "basic":
return []string{"github.com/go-delve/delve/pkg/proc", "github.com/go-delve/delve/service/test", "github.com/go-delve/delve/pkg/terminal"}
case "integration":
return []string{"github.com/go-delve/delve/service/test"}
default:
for _, pkg := range allPackages() {
if pkg == testSet || strings.HasSuffix(pkg, "/"+testSet) {
return []string{pkg}
}
}
return nil
}
}
func defaultBackend() string {
if runtime.GOOS == "darwin" {
return "lldb"
}
return "native"
}
func inpath(exe string) bool {
path, _ := exec.LookPath(exe)
return path != ""
}
func allPackages() []string {
r := []string{}
for _, dir := range strings.Split(getoutput("go", "list", "-mod=vendor", "./..."), "\n") {
dir = strings.TrimSpace(dir)
if dir == "" || strings.Contains(dir, "/vendor/") || strings.Contains(dir, "/_scripts") {
continue
}
r = append(r, dir)
}
sort.Strings(r)
return r
}
func main() {
allPackages() // checks that vendor directory is synced as a side effect
NewMakeCommands().Execute()
}
| [
"\"CERT\"",
"\"CERT\"",
"\"TRAVIS\"",
"\"CERT\"",
"\"CERT\"",
"\"GOBIN\"",
"\"TRAVIS\"",
"\"TEAMCITY_VERSION\"",
"\"TRAVIS\""
]
| []
| [
"GOBIN",
"TEAMCITY_VERSION",
"CERT",
"TRAVIS"
]
| [] | ["GOBIN", "TEAMCITY_VERSION", "CERT", "TRAVIS"] | go | 4 | 0 | |
driver_hsqldb_test.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package avatica
import (
"bytes"
"crypto/sha256"
"database/sql"
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
"time"
)
func skipTestIfNotHSQLDB(t *testing.T) {
val := os.Getenv("AVATICA_FLAVOR")
if val != "HSQLDB" {
t.Skip("Skipping Apache Avatica HSQLDB test")
}
}
func TestHSQLDBConnectionMustBeOpenedWithAutoCommitTrue(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec("CREATE TABLE " + dbt.tableName + " (id BIGINT PRIMARY KEY, val VARCHAR(1))")
dbt.mustExec("INSERT INTO " + dbt.tableName + " VALUES (1,'A')")
dbt.mustExec("INSERT INTO " + dbt.tableName + " VALUES (2,'B')")
rows := dbt.mustQuery("SELECT COUNT(*) FROM " + dbt.tableName)
defer rows.Close()
for rows.Next() {
var count int
err := rows.Scan(&count)
if err != nil {
dbt.Fatal(err)
}
if count != 2 {
dbt.Fatalf("There should be 2 rows, got %d", count)
}
}
})
}
func TestHSQLDBZeroValues(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec("CREATE TABLE " + dbt.tableName + " (int INTEGER PRIMARY KEY, flt FLOAT, bool BOOLEAN, str VARCHAR(1))")
dbt.mustExec("INSERT INTO " + dbt.tableName + " VALUES (0, 0.0, false, '')")
rows := dbt.mustQuery("SELECT * FROM " + dbt.tableName)
defer rows.Close()
for rows.Next() {
var i int
var flt float64
var b bool
var s string
err := rows.Scan(&i, &flt, &b, &s)
if err != nil {
dbt.Fatal(err)
}
if i != 0 {
dbt.Fatalf("Integer should be 0, got %v", i)
}
if flt != 0.0 {
dbt.Fatalf("Float should be 0.0, got %v", flt)
}
if b != false {
dbt.Fatalf("Boolean should be false, got %v", b)
}
if s != "" {
dbt.Fatalf("String should be \"\", got %v", s)
}
}
})
}
func TestHSQLDBDataTypes(t *testing.T) {
// TODO; Test case for Time type is currently commented out due to CALCITE-1951
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
/*dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY,
tint TINYINT,
sint SMALLINT,
bint BIGINT,
num NUMERIC(10,3),
dec DECIMAL(10,3),
re REAL,
flt FLOAT,
dbl DOUBLE,
bool BOOLEAN,
ch CHAR(3),
var VARCHAR(128),
bin BINARY(20),
varbin VARBINARY(128),
dt DATE,
tm TIME,
tmstmp TIMESTAMP,
)`)*/
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY,
tint TINYINT,
sint SMALLINT,
bint BIGINT,
num NUMERIC(10,3),
dec DECIMAL(10,3),
re REAL,
flt FLOAT,
dbl DOUBLE,
bool BOOLEAN,
ch CHAR(3),
var VARCHAR(128),
bin BINARY(20),
varbin VARBINARY(128),
dt DATE,
tmstmp TIMESTAMP,
)`)
var (
integerValue int = -20
tintValue int = -128
sintValue int = -32768
bintValue int = -9223372036854775807
numValue string = "1.333"
decValue string = "1.333"
reValue float64 = 3.555
fltValue float64 = -3.555
dblValue float64 = -9.555
booleanValue bool = true
chValue string = "a"
varcharValue string = "test string"
binValue []byte = make([]byte, 20, 20)
varbinValue []byte = []byte("testtesttest")
dtValue time.Time = time.Date(2100, 2, 1, 0, 0, 0, 0, time.UTC)
// tmValue time.Time = time.Date(0, 1, 1, 21, 21, 21, 222000000, time.UTC)
tmstmpValue time.Time = time.Date(2100, 2, 1, 21, 21, 21, 222000000, time.UTC)
)
copy(binValue[:], []byte("test"))
// dbt.mustExec(`INSERT INTO `+dbt.tableName+` (int, tint, sint, bint, num, dec, re, flt, dbl, bool, ch, var, bin, varbin, dt, tm, tmstmp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
dbt.mustExec(`INSERT INTO `+dbt.tableName+` (int, tint, sint, bint, num, dec, re, flt, dbl, bool, ch, var, bin, varbin, dt, tmstmp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
integerValue,
tintValue,
sintValue,
bintValue,
numValue,
decValue,
reValue,
fltValue,
dblValue,
booleanValue,
chValue,
varcharValue,
binValue,
varbinValue,
dtValue,
// tmValue,
tmstmpValue,
)
rows := dbt.mustQuery("SELECT * FROM " + dbt.tableName)
defer rows.Close()
var (
integer int
tint int
sint int
bint int
num string
dec string
re float64
flt float64
dbl float64
boolean bool
ch string
varchar string
bin []byte
varbin []byte
dt time.Time
// tm time.Time
tmstmp time.Time
)
for rows.Next() {
// err := rows.Scan(&integer, &tint, &sint, &bint, &num, &dec, &re, &flt, &dbl, &boolean, &ch, &varchar, &bin, &varbin, &dt, &tm, &tmstmp)
err := rows.Scan(&integer, &tint, &sint, &bint, &num, &dec, &re, &flt, &dbl, &boolean, &ch, &varchar, &bin, &varbin, &dt, &tmstmp)
if err != nil {
dbt.Fatal(err)
}
}
comparisons := []struct {
result interface{}
expected interface{}
}{
{integer, integerValue},
{tint, tintValue},
{sint, sintValue},
{bint, bintValue},
{num, numValue},
{dec, decValue},
{re, reValue},
{flt, fltValue},
{dbl, dblValue},
{boolean, booleanValue},
{ch, chValue + " "}, // HSQLDB pads CHAR columns if a length is specified
{varchar, varcharValue},
{bin, binValue},
{varbin, varbinValue},
{dt, dtValue},
// {tm, tmValue},
{tmstmp, tmstmpValue},
}
for _, tt := range comparisons {
if v, ok := tt.expected.(time.Time); ok {
if !v.Equal(tt.result.(time.Time)) {
dbt.Fatalf("Expected %v, got %v.", tt.expected, tt.result)
}
} else if v, ok := tt.expected.([]byte); ok {
if !bytes.Equal(v, tt.result.([]byte)) {
dbt.Fatalf("Expected %v, got %v.", tt.expected, tt.result)
}
} else if tt.expected != tt.result {
dbt.Errorf("Expected %v, got %v.", tt.expected, tt.result)
}
}
})
}
func TestHSQLDBSQLNullTypes(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
id INTEGER PRIMARY KEY,
int INTEGER,
tint TINYINT,
sint SMALLINT,
bint BIGINT,
num NUMERIC(10,3),
dec DECIMAL(10,3),
re REAL,
flt FLOAT,
dbl DOUBLE,
bool BOOLEAN,
ch CHAR(3),
var VARCHAR(128),
bin BINARY(20),
varbin VARBINARY(128),
dt DATE,
tmstmp TIMESTAMP,
)`)
var (
idValue = time.Now().Unix()
integerValue = sql.NullInt64{}
tintValue = sql.NullInt64{}
sintValue = sql.NullInt64{}
bintValue = sql.NullInt64{}
numValue = sql.NullString{}
decValue = sql.NullString{}
reValue = sql.NullFloat64{}
fltValue = sql.NullFloat64{}
dblValue = sql.NullFloat64{}
booleanValue = sql.NullBool{}
chValue = sql.NullString{}
varcharValue = sql.NullString{}
binValue *[]byte = nil
varbinValue *[]byte = nil
dtValue *time.Time = nil
tmstmpValue *time.Time = nil
)
dbt.mustExec(`INSERT INTO `+dbt.tableName+` (id, int, tint, sint, bint, num, dec, re, flt, dbl, bool, ch, var, bin, varbin, dt, tmstmp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
idValue,
integerValue,
tintValue,
sintValue,
bintValue,
numValue,
decValue,
reValue,
fltValue,
dblValue,
booleanValue,
chValue,
varcharValue,
binValue,
varbinValue,
dtValue,
tmstmpValue,
)
rows := dbt.mustQuery("SELECT * FROM "+dbt.tableName+" WHERE id = ?", idValue)
defer rows.Close()
var (
id int64
integer sql.NullInt64
tint sql.NullInt64
sint sql.NullInt64
bint sql.NullInt64
num sql.NullString
dec sql.NullString
re sql.NullFloat64
flt sql.NullFloat64
dbl sql.NullFloat64
boolean sql.NullBool
ch sql.NullString
varchar sql.NullString
bin *[]byte
varbin *[]byte
dt *time.Time
tmstmp *time.Time
)
for rows.Next() {
err := rows.Scan(&id, &integer, &tint, &sint, &bint, &num, &dec, &re, &flt, &dbl, &boolean, &ch, &varchar, &bin, &varbin, &dt, &tmstmp)
if err != nil {
dbt.Fatal(err)
}
}
comparisons := []struct {
result interface{}
expected interface{}
}{
{integer, integerValue},
{tint, tintValue},
{sint, sintValue},
{bint, bintValue},
{num, numValue},
{dec, decValue},
{re, reValue},
{flt, fltValue},
{dbl, dblValue},
{boolean, booleanValue},
{ch, chValue},
{varchar, varcharValue},
{bin, binValue},
{varbin, varbinValue},
{dt, dtValue},
{tmstmp, tmstmpValue},
}
for i, tt := range comparisons {
if v, ok := tt.expected.(time.Time); ok {
if !v.Equal(tt.result.(time.Time)) {
dbt.Fatalf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
} else if v, ok := tt.expected.([]byte); ok {
if !bytes.Equal(v, tt.result.([]byte)) {
dbt.Fatalf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
} else if tt.expected != tt.result {
dbt.Errorf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
}
})
}
func TestHSQLDBNulls(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
id INTEGER PRIMARY KEY,
int INTEGER,
tint TINYINT,
sint SMALLINT,
bint BIGINT,
num NUMERIC(10,3),
dec DECIMAL(10,3),
re REAL,
flt FLOAT,
dbl DOUBLE,
bool BOOLEAN,
ch CHAR(3),
var VARCHAR(128),
bin BINARY(20),
varbin VARBINARY(128),
dt DATE,
tmstmp TIMESTAMP,
)`)
idValue := time.Now().Unix()
dbt.mustExec(`INSERT INTO `+dbt.tableName+` (id, int, tint, sint, bint, num, dec, re, flt, dbl, bool, ch, var, bin, varbin, dt, tmstmp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
idValue,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
nil,
)
rows := dbt.mustQuery("SELECT * FROM "+dbt.tableName+" WHERE id = ?", idValue)
defer rows.Close()
var (
id int64
integer sql.NullInt64
tint sql.NullInt64
sint sql.NullInt64
bint sql.NullInt64
num sql.NullString
dec sql.NullString
re sql.NullFloat64
flt sql.NullFloat64
dbl sql.NullFloat64
boolean sql.NullBool
ch sql.NullString
varchar sql.NullString
bin *[]byte
varbin *[]byte
dt *time.Time
tmstmp *time.Time
)
for rows.Next() {
err := rows.Scan(&id, &integer, &tint, &sint, &bint, &num, &dec, &re, &flt, &dbl, &boolean, &ch, &varchar, &bin, &varbin, &dt, &tmstmp)
if err != nil {
dbt.Fatal(err)
}
}
comparisons := []struct {
result interface{}
expected interface{}
}{
{integer, sql.NullInt64{}},
{tint, sql.NullInt64{}},
{sint, sql.NullInt64{}},
{bint, sql.NullInt64{}},
{num, sql.NullString{}},
{dec, sql.NullString{}},
{re, sql.NullFloat64{}},
{flt, sql.NullFloat64{}},
{dbl, sql.NullFloat64{}},
{boolean, sql.NullBool{}},
{ch, sql.NullString{}},
{varchar, sql.NullString{}},
{bin, (*[]byte)(nil)},
{varbin, (*[]byte)(nil)},
{dt, (*time.Time)(nil)},
{tmstmp, (*time.Time)(nil)},
}
for i, tt := range comparisons {
if v, ok := tt.expected.(time.Time); ok {
if !v.Equal(tt.result.(time.Time)) {
dbt.Fatalf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
} else if v, ok := tt.expected.([]byte); ok {
if !bytes.Equal(v, tt.result.([]byte)) {
dbt.Fatalf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
} else if tt.expected != tt.result {
dbt.Errorf("Expected %v for case %d, got %v.", tt.expected, i, tt.result)
}
}
})
}
// TODO: Test case commented out due to CALCITE-1951
/*func TestHSQLDBLocations(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?location=Australia/Melbourne"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
tm TIME(6) PRIMARY KEY,
dt DATE,
tmstmp TIMESTAMP
)`)
loc, err := time.LoadLocation("Australia/Melbourne")
if err != nil {
dbt.Fatalf("Unexpected error: %s", err)
}
var (
tmValue time.Time = time.Date(0, 1, 1, 21, 21, 21, 222000000, loc)
dtValue time.Time = time.Date(2100, 2, 1, 0, 0, 0, 0, loc)
tmstmpValue time.Time = time.Date(2100, 2, 1, 21, 21, 21, 222000000, loc)
)
dbt.mustExec(`INSERT INTO `+dbt.tableName+`(tm, dt, tmstmp) VALUES (?, ?, ?)`,
tmValue,
dtValue,
tmstmpValue,
)
rows := dbt.mustQuery("SELECT * FROM " + dbt.tableName)
defer rows.Close()
var (
tm time.Time
dt time.Time
tmstmp time.Time
)
for rows.Next() {
err := rows.Scan(&tm, &dt, &tmstmp)
if err != nil {
dbt.Fatal(err)
}
}
comparisons := []struct {
result time.Time
expected time.Time
}{
{tm, tmValue},
{dt, dtValue},
{tmstmp, tmstmpValue},
}
for _, tt := range comparisons {
if !tt.result.Equal(tt.expected) {
dbt.Errorf("Expected %v, got %v.", tt.expected, tt.result)
}
}
})
}*/
func TestHSQLDBDateAndTimestampsBefore1970(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY,
dt DATE,
tmstmp TIMESTAMP
)`)
var (
integerValue int = 1
dtValue time.Time = time.Date(1945, 5, 20, 0, 0, 0, 0, time.UTC)
tmstmpValue time.Time = time.Date(1911, 5, 20, 21, 21, 21, 222000000, time.UTC)
)
dbt.mustExec(`INSERT INTO `+dbt.tableName+`(int, dt, tmstmp) VALUES (?, ?, ?)`,
integerValue,
dtValue,
tmstmpValue,
)
rows := dbt.mustQuery("SELECT dt, tmstmp FROM " + dbt.tableName)
defer rows.Close()
var (
dt time.Time
tmstmp time.Time
)
for rows.Next() {
err := rows.Scan(&dt, &tmstmp)
if err != nil {
dbt.Fatal(err)
}
}
comparisons := []struct {
result time.Time
expected time.Time
}{
{dt, dtValue},
{tmstmp, tmstmpValue},
}
for _, tt := range comparisons {
if !tt.expected.Equal(tt.result) {
dbt.Fatalf("Expected %v, got %v.", tt.expected, tt.result)
}
}
})
}
func TestHSQLDBStoreAndRetrieveBinaryData(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
// TODO: Switch VARBINARY to BLOB once avatica supports BLOBs and CBLOBs. CALCITE-1957
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY,
bin VARBINARY(999999)
)`)
filePath := filepath.Join("test-fixtures", "calcite.png")
file, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fatalf("Unable to read text-fixture: %s", filePath)
}
hash := sha256.Sum256(file)
dbt.mustExec(`INSERT INTO `+dbt.tableName+` (int, bin) VALUES (?, ?)`,
1,
file,
)
rows := dbt.mustQuery("SELECT bin FROM " + dbt.tableName)
defer rows.Close()
var receivedFile []byte
for rows.Next() {
err := rows.Scan(&receivedFile)
if err != nil {
dbt.Fatal(err)
}
}
receivedHash := sha256.Sum256(receivedFile)
if !bytes.Equal(hash[:], receivedHash[:]) {
t.Fatalf("Hash of stored file (%x) does not equal hash of retrieved file (%x).", hash[:], receivedHash[:])
}
})
}
func TestHSQLDBCommittingTransactions(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?transactionIsolation=4"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
tx, err := dbt.db.Begin()
if err != nil {
t.Fatalf("Unable to create transaction: %s", err)
}
stmt, err := tx.Prepare(`INSERT INTO ` + dbt.tableName + `(int) VALUES(?)`)
if err != nil {
t.Fatalf("Could not prepare statement: %s", err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
r := tx.QueryRow("SELECT COUNT(*) FROM " + dbt.tableName)
var count int
err = r.Scan(&count)
if err != nil {
t.Fatalf("Unable to scan row result: %s", err)
}
if count != totalRows {
t.Fatalf("Expected %d rows, got %d", totalRows, count)
}
// Commit the transaction
tx.Commit()
rows := dbt.mustQuery("SELECT COUNT(*) FROM " + dbt.tableName)
var countAfterRollback int
for rows.Next() {
err := rows.Scan(&countAfterRollback)
if err != nil {
dbt.Fatal(err)
}
}
if countAfterRollback != totalRows {
t.Fatalf("Expected %d rows, got %d", totalRows, countAfterRollback)
}
})
}
func TestHSQLDBRollingBackTransactions(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?transactionIsolation=4"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
tx, err := dbt.db.Begin()
if err != nil {
t.Fatalf("Unable to create transaction: %s", err)
}
stmt, err := tx.Prepare(`INSERT INTO ` + dbt.tableName + `(int) VALUES(?)`)
if err != nil {
t.Fatalf("Could not prepare statement: %s", err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
r := tx.QueryRow(`SELECT COUNT(*) FROM ` + dbt.tableName)
var count int
err = r.Scan(&count)
if err != nil {
t.Fatalf("Unable to scan row result: %s", err)
}
if count != totalRows {
t.Fatalf("Expected %d rows, got %d", totalRows, count)
}
// Rollback the transaction
tx.Rollback()
rows := dbt.mustQuery(`SELECT COUNT(*) FROM ` + dbt.tableName)
var countAfterRollback int
for rows.Next() {
err := rows.Scan(&countAfterRollback)
if err != nil {
dbt.Fatal(err)
}
}
if countAfterRollback != 0 {
t.Fatalf("Expected %d rows, got %d", 0, countAfterRollback)
}
})
}
func TestHSQLDBPreparedStatements(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + `(int) VALUES(?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
queryStmt, err := dbt.db.Prepare(`SELECT * FROM ` + dbt.tableName + ` WHERE int = ?`)
if err != nil {
dbt.Fatal(err)
}
var res int
for i := 1; i <= totalRows; i++ {
err := queryStmt.QueryRow(i).Scan(&res)
if err != nil {
dbt.Fatal(err)
}
if res != i {
dbt.Fatalf("Unexpected query result. Expected %d, got %d.", i, res)
}
}
})
}
func TestHSQLDBFetchingMoreRows(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?maxRowsTotal=-1&frameMaxSize=1"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + `(int) VALUES(?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
rows := dbt.mustQuery(`SELECT * FROM ` + dbt.tableName)
defer rows.Close()
count := 0
for rows.Next() {
count++
}
if count != totalRows {
dbt.Fatalf("Expected %d rows to be retrieved, retrieved %d", totalRows, count)
}
})
}
func TestHSQLDBExecuteShortcut(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
res, err := dbt.db.Exec(`INSERT INTO ` + dbt.tableName + `(int) VALUES(1)`)
if err != nil {
dbt.Fatal(err)
}
affected, err := res.RowsAffected()
if err != nil {
dbt.Fatal(err)
}
if affected != 1 {
dbt.Fatalf("Expected 1 row to be affected, %d affected", affected)
}
})
}
func TestHSQLDBQueryShortcut(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?maxRowsTotal=-1&frameMaxSize=1"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + `(int) VALUES(?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
rows := dbt.mustQuery(`SELECT * FROM ` + dbt.tableName)
defer rows.Close()
count := 0
for rows.Next() {
count++
}
if count != totalRows {
dbt.Fatalf("Expected %d rows to be retrieved, retrieved %d", totalRows, count)
}
})
}
// TODO: Test disabled due to CALCITE-2250
/*func TestHSQLDBOptimisticConcurrency(t *testing.T) {
skipTestIfNotHSQLDB(t)
query := "?transactionIsolation=4"
runTests(t, dsn+query, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
id INTEGER PRIMARY KEY,
msg VARCHAR(64),
version INTEGER
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + `(id, msg, version) VALUES(?, ?, ?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i, fmt.Sprintf("message version %d", i), i)
if err != nil {
dbt.Fatal(err)
}
}
// Start the transactions
tx1, err := dbt.db.Begin()
if err != nil {
dbt.Fatal(err)
}
tx2, err := dbt.db.Begin()
if err != nil {
dbt.Fatal(err)
}
// Select from first transaction
_ = tx1.QueryRow(`SELECT MAX(version) FROM ` + dbt.tableName)
// Modify using second transaction
_, err = tx2.Exec(`INSERT INTO `+dbt.tableName+`(id, msg, version) VALUES(?, ?, ?)`, 7, "message value 7", 7)
if err != nil {
dbt.Fatal(err)
}
err = tx2.Commit()
if err != nil {
dbt.Fatal(err)
}
// Modify using tx1
_, err = tx1.Exec(`INSERT INTO `+dbt.tableName+`(id, msg, version) VALUES(?, ?, ?)`, 7, "message value 7", 7)
if err != nil {
dbt.Fatal(err)
}
err = tx1.Commit()
if err == nil {
dbt.Fatal("Expected an error, but did not receive any.")
}
errName := err.(ResponseError).Name()
if errName != "transaction_conflict_exception" {
dbt.Fatal("Expected transaction_conflict")
}
})
}*/
func TestHSQLDBLastInsertIDShouldReturnError(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn, func(dbt *DBTest) {
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
id INTEGER IDENTITY PRIMARY KEY,
msg VARCHAR(3),
version INTEGER
)`)
res, err := dbt.db.Exec(`INSERT INTO ` + dbt.tableName + `(msg, version) VALUES('abc', 1)`)
if err != nil {
dbt.Fatal(err)
}
_, err = res.LastInsertId()
if err == nil {
dbt.Fatal("Expected an error as Avatica does not support LastInsertId(), but there was no error.")
}
})
}
func TestHSQLDBSchemaSupport(t *testing.T) {
skipTestIfNotHSQLDB(t)
db, err := sql.Open("avatica", dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
_, err = db.Exec("CREATE SCHEMA IF NOT EXISTS avaticatest")
if err != nil {
t.Fatalf("error creating schema: %s", err)
}
defer db.Exec("DROP SCHEMA IF EXISTS avaticatest")
path := "/avaticatest"
runTests(t, dsn+path, func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE avaticatest.` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
);`)
defer dbt.mustExec(`DROP TABLE IF EXISTS avaticatest.` + dbt.tableName)
_, err := dbt.db.Exec(`INSERT INTO avaticatest.` + dbt.tableName + `(int) VALUES(1)`)
if err != nil {
dbt.Fatal(err)
}
rows := dbt.mustQuery(`SELECT * FROM avaticatest.` + dbt.tableName)
defer rows.Close()
count := 0
for rows.Next() {
count++
}
if count != 1 {
dbt.Errorf("Expected 1 row, got %d rows back,", count)
}
})
}
func TestHSQLDBMultipleSchemaSupport(t *testing.T) {
skipTestIfNotHSQLDB(t)
db, err := sql.Open("avatica", dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
_, err = db.Exec("CREATE SCHEMA IF NOT EXISTS avaticatest1")
if err != nil {
t.Fatalf("error creating schema: %s", err)
}
defer db.Exec("DROP SCHEMA IF EXISTS avaticatest1")
_, err = db.Exec("CREATE SCHEMA IF NOT EXISTS avaticatest2")
if err != nil {
t.Fatalf("error creating schema: %s", err)
}
defer db.Exec("DROP SCHEMA IF EXISTS avaticatest2")
path := "/avaticatest1"
runTests(t, dsn+path, func(dbt *DBTest) {
dbt.mustExec(`SET INITIAL SCHEMA avaticatest2`)
// Create and seed table
dbt.mustExec(`CREATE TABLE avaticatest2.` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
defer dbt.mustExec(`DROP TABLE IF EXISTS avaticatest2.` + dbt.tableName)
_, err := dbt.db.Exec(`INSERT INTO avaticatest2.` + dbt.tableName + `(int) VALUES(1)`)
if err != nil {
dbt.Fatal(err)
}
rows := dbt.mustQuery(`SELECT * FROM avaticatest2.` + dbt.tableName)
defer rows.Close()
count := 0
for rows.Next() {
count++
}
if count != 1 {
dbt.Errorf("Expected 1 row, got %d rows back,", count)
}
})
}
func TestHSQLDBExecBatch(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn+"?batching=true", func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + ` VALUES(?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
for i := 1; i <= totalRows; i++ {
_, err := stmt.Exec(i)
if err != nil {
dbt.Fatal(err)
}
}
// When batching=true, after exec(sql), need to close the stmt
err = stmt.Close()
if err != nil {
dbt.Fatal(err)
}
queryStmt, err := dbt.db.Prepare(`SELECT * FROM ` + dbt.tableName + ` WHERE int = ?`)
if err != nil {
dbt.Fatal(err)
}
var res int
for i := 1; i <= totalRows; i++ {
err := queryStmt.QueryRow(i).Scan(&res)
if err != nil {
dbt.Fatal(err)
}
if res != i {
dbt.Fatalf("Unexpected query result. Expected %d, got %d.", i, res)
}
}
})
}
func TestHSQLDBExecBatchConcurrency(t *testing.T) {
skipTestIfNotHSQLDB(t)
runTests(t, dsn+"?batching=true", func(dbt *DBTest) {
// Create and seed table
dbt.mustExec(`CREATE TABLE ` + dbt.tableName + ` (
int INTEGER PRIMARY KEY
)`)
stmt, err := dbt.db.Prepare(`INSERT INTO ` + dbt.tableName + ` VALUES(?)`)
if err != nil {
dbt.Fatal(err)
}
totalRows := 6
var wg sync.WaitGroup
for i := 1; i <= totalRows; i++ {
wg.Add(1)
go func(num int) {
defer wg.Done()
_, err := stmt.Exec(num)
if err != nil {
dbt.Fatal(err)
}
}(i)
}
wg.Wait()
// When batching=true, after exec(sql), need to close the stmt
err = stmt.Close()
if err != nil {
dbt.Fatal(err)
}
queryStmt, err := dbt.db.Prepare(`SELECT * FROM ` + dbt.tableName + ` WHERE int = ?`)
if err != nil {
dbt.Fatal(err)
}
var res int
for i := 1; i <= totalRows; i++ {
err := queryStmt.QueryRow(i).Scan(&res)
if err != nil {
dbt.Fatal(err)
}
if res != i {
dbt.Fatalf("Unexpected query result. Expected %d, got %d.", i, res)
}
}
})
}
// TODO: Test disabled due to CALCITE-1049
/*func TestHSQLDBErrorCodeParsing(t *testing.T) {
skipTestIfNotHSQLDB(t)
db, err := sql.Open("avatica", dsn)
if err != nil {
t.Fatalf("error connecting: %s", err.Error())
}
defer db.Close()
_, err = db.Query("SELECT * FROM table_that_does_not_exist")
if err == nil {
t.Error("Expected error due to selecting from non-existent table, but there was no error.")
}
resErr, ok := err.(ResponseError)
if !ok {
t.Fatalf("Error type was not ResponseError")
}
if resErr.ErrorCode != 1012 {
t.Errorf("Expected error code to be %d, got %d.", 1012, resErr.ErrorCode)
}
if resErr.SqlState != "42M03" {
t.Errorf("Expected SQL state to be %s, got %s.", "42M03", resErr.SqlState)
}
}*/
| [
"\"AVATICA_FLAVOR\""
]
| []
| [
"AVATICA_FLAVOR"
]
| [] | ["AVATICA_FLAVOR"] | go | 1 | 0 | |
test/e2e/fixtures/needs.go | package fixtures
import (
"os"
)
type Need func(s *E2ESuite) (met bool, message string)
var (
CI Need = func(s *E2ESuite) (bool, string) {
return os.Getenv("CI") != "", "CI"
}
BaseLayerArtifacts Need = func(s *E2ESuite) (bool, string) {
met, _ := None(K8SAPI, Kubelet)(s)
return met, "base layer artifact support"
}
Docker = Executor("docker")
K8SAPI = Executor("k8sapi")
Kubelet = Executor("kubelet")
)
func Executor(e string) Need {
return func(s *E2ESuite) (bool, string) {
return s.Config.ContainerRuntimeExecutor == e, e
}
}
func None(needs ...Need) Need {
return func(s *E2ESuite) (bool, string) {
for _, n := range needs {
met, message := n(s)
if met {
return false, "not " + message
}
}
return true, ""
}
}
func Any(needs ...Need) Need {
return func(s *E2ESuite) (bool, string) {
for _, n := range needs {
met, _ := n(s)
if met {
return true, ""
}
}
return false, ""
}
}
func All(needs ...Need) Need {
return func(s *E2ESuite) (bool, string) {
for _, n := range needs {
met, message := n(s)
if !met {
return false, message
}
}
return true, ""
}
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
eliteEvents/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eliteEvents.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pspnet.py | #!/usr/bin/env python
from __future__ import print_function
import os
from os.path import splitext, join, isfile, isdir, basename
import argparse
import numpy as np
from scipy import misc, ndimage
from keras import backend as K
from keras.models import model_from_json, load_model
import tensorflow as tf
import layers_builder as layers
from glob import glob
from utils import utils
from keras.utils.generic_utils import CustomObjectScope
import cv2
import math
# -- Fix for macos, uncomment it
# import matplotlib
# matplotlib.use('TkAgg')
# --
import matplotlib.pyplot as plt
from imageio import imread
# These are the means for the ImageNet pretrained ResNet
DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]]) # RGB order
class PSPNet(object):
"""Pyramid Scene Parsing Network by Hengshuang Zhao et al 2017"""
def __init__(self, nb_classes, resnet_layers, input_shape, weights):
self.input_shape = input_shape
self.num_classes = nb_classes
json_path = join("weights", "keras", weights + ".json")
h5_path = join("weights", "keras", weights + ".h5")
if 'pspnet' in weights:
if os.path.isfile(json_path) and os.path.isfile(h5_path):
print("Keras model & weights found, loading...")
with CustomObjectScope({'Interp': layers.Interp}):
with open(json_path) as file_handle:
self.model = model_from_json(file_handle.read())
self.model.load_weights(h5_path)
else:
print("No Keras model & weights found, import from npy weights.")
self.model = layers.build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
else:
print('Load pre-trained weights')
self.model = load_model(weights)
def predict(self, img, flip_evaluation=False):
"""
Predict segementation for an image.
Arguments:
img: must be rowsxcolsx3
"""
if img.shape[0:2] != self.input_shape:
print(
"Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (
img.shape[0:2], self.input_shape))
img = misc.imresize(img, self.input_shape)
img = img - DATA_MEAN
img = img[:, :, ::-1] # RGB => BGR
img = img.astype('float32')
probs = self.feed_forward(img, flip_evaluation)
return probs
def predict_sliding(self, full_img, flip_evaluation):
"""
Predict on tiles of exactly the network input shape.
This way nothing gets squeezed.
"""
tile_size = self.input_shape
classes = self.num_classes
overlap = 1 / 3
stride = math.ceil(tile_size[0] * (1 - overlap))
tile_rows = max(int(math.ceil((full_img.shape[0] - tile_size[0]) / stride) + 1), 1) # strided convolution formula
tile_cols = max(int(math.ceil((full_img.shape[1] - tile_size[1]) / stride) + 1), 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = np.zeros((full_img.shape[0], full_img.shape[1], classes))
count_predictions = np.zeros((full_img.shape[0], full_img.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_img.shape[1])
y2 = min(y1 + tile_size[0], full_img.shape[0])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = full_img[y1:y2, x1:x2]
padded_img = self.pad_image(img, tile_size)
plt.imshow(padded_img)
plt.show()
tile_counter += 1
print("Predicting tile %i" % tile_counter)
padded_prediction = self.predict(padded_img, flip_evaluation)
prediction = padded_prediction[0:img.shape[0], 0:img.shape[1], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
full_probs /= count_predictions
# visualize normalization Weights
# plt.imshow(np.mean(count_predictions, axis=2))
# plt.show()
return full_probs
@staticmethod
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = target_size[0] - img.shape[0]
cols_missing = target_size[1] - img.shape[1]
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img
def predict_multi_scale(self, img, flip_evaluation, sliding_evaluation, scales):
"""Predict an image by looking at it with different scales."""
full_probs = np.zeros((img.shape[0], img.shape[1], self.num_classes))
h_ori, w_ori = img.shape[:2]
print("Started prediction...")
for scale in scales:
print("Predicting image scaled by %f" % scale)
scaled_img = misc.imresize(img, size=scale, interp="bilinear")
if sliding_evaluation:
scaled_probs = self.predict_sliding(scaled_img, flip_evaluation)
else:
scaled_probs = self.predict(scaled_img, flip_evaluation)
# scale probs up to full size
# visualize_prediction(probs)
probs = cv2.resize(scaled_probs, (w_ori, h_ori))
full_probs += probs
full_probs /= len(scales)
print("Finished prediction...")
return full_probs
def feed_forward(self, data, flip_evaluation=False):
assert data.shape == (self.input_shape[0], self.input_shape[1], 3)
if flip_evaluation:
print("Predict flipped")
input_with_flipped = np.array(
[data, np.flip(data, axis=1)])
prediction_with_flipped = self.model.predict(input_with_flipped)
prediction = (prediction_with_flipped[
0] + np.fliplr(prediction_with_flipped[1])) / 2.0
else:
prediction = self.model.predict(np.expand_dims(data, 0))[0]
return prediction
def set_npy_weights(self, weights_path):
npy_weights_path = join("weights", "npy", weights_path + ".npy")
json_path = join("weights", "keras", weights_path + ".json")
h5_path = join("weights", "keras", weights_path + ".h5")
print("Importing weights from %s" % npy_weights_path)
weights = np.load(npy_weights_path, encoding='bytes').item()
for layer in self.model.layers:
print(layer.name)
if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn':
mean = weights[layer.name.encode()][
'mean'.encode()].reshape(-1)
variance = weights[layer.name.encode()][
'variance'.encode()].reshape(-1)
scale = weights[layer.name.encode()][
'scale'.encode()].reshape(-1)
offset = weights[layer.name.encode()][
'offset'.encode()].reshape(-1)
self.model.get_layer(layer.name).set_weights(
[scale, offset, mean, variance])
elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu':
try:
weight = weights[layer.name.encode()]['weights'.encode()]
self.model.get_layer(layer.name).set_weights([weight])
except Exception as err:
biases = weights[layer.name.encode()]['biases'.encode()]
self.model.get_layer(layer.name).set_weights([weight,
biases])
print('Finished importing weights.')
print("Writing keras model & weights")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
self.model.save_weights(h5_path)
print("Finished writing Keras model & weights")
class PSPNet50(PSPNet):
"""Build a PSPNet based on a 50-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=50,
input_shape=input_shape, weights=weights)
class PSPNet101(PSPNet):
"""Build a PSPNet based on a 101-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=101,
input_shape=input_shape, weights=weights)
def main(args):
# Handle input and output args
images = glob(args.glob_path) if args.glob_path else [args.input_path, ]
if args.glob_path:
fn, ext = splitext(args.output_path)
if ext:
parser.error("output_path should be a folder for multiple file input")
if not isdir(args.output_path):
os.mkdir(args.output_path)
# Predict
os.environ["CUDA_VISIBLE_DEVICES"] = args.id
sess = tf.Session()
K.set_session(sess)
with sess.as_default():
print(args)
if not args.weights:
if "pspnet50" in args.model:
pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473),
weights=args.model)
elif "pspnet101" in args.model:
if "cityscapes" in args.model:
pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights=args.model)
if "voc2012" in args.model:
pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473),
weights=args.model)
else:
print("Network architecture not implemented.")
else:
pspnet = PSPNet50(nb_classes=2, input_shape=(
768, 480), weights=args.weights)
EVALUATION_SCALES = [1.0]
if args.multi_scale:
EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] # must be all floats! Taken from original paper
for i, img_path in enumerate(images):
print("Processing image {} / {}".format(i + 1, len(images)))
img = imread(img_path, pilmode='RGB')
probs = pspnet.predict_multi_scale(img, args.flip, args.sliding, EVALUATION_SCALES)
cm = np.argmax(probs, axis=2)
pm = np.max(probs, axis=2)
colored_class_image = utils.color_class_image(cm, args.model)
alpha_blended = 0.5 * colored_class_image + 0.5 * img
if args.glob_path:
input_filename, ext = splitext(basename(img_path))
filename = join(args.output_path, input_filename)
else:
filename, ext = splitext(args.output_path)
misc.imsave(filename + "_seg_read" + ext, cm)
misc.imsave(filename + "_seg" + ext, colored_class_image)
misc.imsave(filename + "_probs" + ext, pm)
misc.imsave(filename + "_seg_blended" + ext, alpha_blended)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default='pspnet101_voc2012',
help='Model/Weights to use',
choices=['pspnet50_ade20k',
'pspnet101_cityscapes',
'pspnet101_voc2012'])
parser.add_argument('-w', '--weights', type=str, default=None)
parser.add_argument('-i', '--input_path', type=str, default='example_images/ade20k.jpg',
help='Path the input image')
parser.add_argument('-g', '--glob_path', type=str, default=None,
help='Glob path for multiple images')
parser.add_argument('-o', '--output_path', type=str, default='example_results/ade20k.jpg',
help='Path to output')
parser.add_argument('--id', default="0")
parser.add_argument('--input_size', type=int, default=500)
parser.add_argument('-s', '--sliding', action='store_true',
help="Whether the network should be slided over the original image for prediction.")
parser.add_argument('-f', '--flip', action='store_true', default=True,
help="Whether the network should predict on both image and flipped image.")
parser.add_argument('-ms', '--multi_scale', action='store_true',
help="Whether the network should predict on multiple scales.")
args = parser.parse_args()
main(args)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
soracom/generated/cmd/subscribers_deactivate.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"net/url"
"os"
"github.com/spf13/cobra"
)
// SubscribersDeactivateCmdImsi holds value of 'imsi' option
var SubscribersDeactivateCmdImsi string
func init() {
SubscribersDeactivateCmd.Flags().StringVar(&SubscribersDeactivateCmdImsi, "imsi", "", TRAPI("IMSI of the target subscriber."))
SubscribersCmd.AddCommand(SubscribersDeactivateCmd)
}
// SubscribersDeactivateCmd defines 'deactivate' subcommand
var SubscribersDeactivateCmd = &cobra.Command{
Use: "deactivate",
Short: TRAPI("/subscribers/{imsi}/deactivate:post:summary"),
Long: TRAPI(`/subscribers/{imsi}/deactivate:post:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSubscribersDeactivateCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSubscribersDeactivateCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("imsi", "imsi", "path", parsedBody, SubscribersDeactivateCmdImsi)
if err != nil {
return nil, err
}
return &apiParams{
method: "POST",
path: buildPathForSubscribersDeactivateCmd("/subscribers/{imsi}/deactivate"),
query: buildQueryForSubscribersDeactivateCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSubscribersDeactivateCmd(path string) string {
escapedImsi := url.PathEscape(SubscribersDeactivateCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForSubscribersDeactivateCmd() url.Values {
result := url.Values{}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
labellab-flask/api/config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
imagesdir = os.path.join(os.path.dirname(basedir),'uploads')
"""Constants used throughout the application.
All hard coded settings/data that are not actual/official configuration
options for Flask and their extensions goes here.
"""
class Config:
"""Default Flask configuration inherited by all environments. Use this for
development environments.
"""
SECRET_KEY = os.environ.get("SECRET_KEY") or "big secret"
JWT_SECRET_KEY = os.environ.get("SECRET_KEY") or "very big secret"
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ["access", "refresh"]
LABELS_ALLOWED = ["bbox","polygon"]
TEAMS_ALLOWED = ["labels","images","image labelling","models"]
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""Development Congigurations"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DEV_DATABASE_URL"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
ML_FILES_DIR = os.path.join(os.path.dirname(basedir),'ml_files')
UPLOAD_FOLDER = imagesdir
class TestingConfig(Config):
"""
Testing config applies for both local testing and travis configurations
"""
TESTING = True
WTF_CSRF_ENABLED = False
TEST_DATABASE = os.environ.get(
"TEST_DATABASE_URL"
)
if os.getenv("FLASK_CONFIG")=="travis":
pass
else:
from sqlalchemy_utils.functions import database_exists, create_database
if not database_exists(TEST_DATABASE):
create_database(TEST_DATABASE)
SQLALCHEMY_DATABASE_URI = TEST_DATABASE
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
UPLOAD_FOLDER = imagesdir
class ProductionConfig(Config):
"""Production Congigurations"""
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DATABASE_URL"
)
@classmethod
def init_app(cls, app):
Config.init_app(app)
class DockerConfig(Config):
"""Docker config"""
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class TravisConfig(Config):
"""
Configs for travis
"""
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# needs to be removed in further versions
UPLOAD_FOLDER = imagesdir
ML_FILES_DIR = os.path.join(os.path.dirname(basedir),'ml_files')
LABELS_ALLOWED = ["bbox","polygon"]
TEAMS_ALLOWED = ["labels","images","image labelling","models"]
config = {
"development": DevelopmentConfig,
"testing": TestingConfig,
"production": ProductionConfig,
"docker": DockerConfig,
"default": DevelopmentConfig,
"travis": TravisConfig
}
| []
| []
| [
"DATABASE_URL\"\n ",
"DEV_DATABASE_URL\"\n ",
"TEST_DATABASE_URL\"\n ",
"SECRET_KEY",
"FLASK_CONFIG"
]
| [] | ["DATABASE_URL\"\n ", "DEV_DATABASE_URL\"\n ", "TEST_DATABASE_URL\"\n ", "SECRET_KEY", "FLASK_CONFIG"] | python | 5 | 0 | |
azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/key_vault_secret_reference.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyVaultSecretReference(Model):
"""Describes a reference to Key Vault Secret.
All required parameters must be populated in order to send to Azure.
:param secret_url: Required. The URL referencing a secret in a Key Vault.
:type secret_url: str
:param source_vault: Required. The relative URL of the Key Vault
containing the secret.
:type source_vault: ~azure.mgmt.compute.v2018_04_01.models.SubResource
"""
_validation = {
'secret_url': {'required': True},
'source_vault': {'required': True},
}
_attribute_map = {
'secret_url': {'key': 'secretUrl', 'type': 'str'},
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
}
def __init__(self, **kwargs):
super(KeyVaultSecretReference, self).__init__(**kwargs)
self.secret_url = kwargs.get('secret_url', None)
self.source_vault = kwargs.get('source_vault', None)
| []
| []
| []
| [] | [] | python | null | null | null |
bindings/python/cntk/__init__.py | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os
os.environ["PATH"] += os.pathsep + os.path.join(os.path.dirname(__file__), 'libs')
# Read version information
version_file = open(os.path.join(os.path.dirname(__file__), 'VERSION'), 'r')
__version__ = version_file.read()
version_file.close()
del version_file
import numpy as np
from . import cntk_py
#
# Bubble the below namespaces to cntk root namespace.
#
from .core import *
from .variables import Parameter, Constant
from .ops import *
from .device import *
from .train import *
from .eval import *
from .learners import *
from .losses import *
from .metrics import *
from .initializer import *
from .default_options import *
from . import debugging
from . import logging
from . import io
from . import layers
from . import misc
from . import random
from .sample_installer import install_samples
DATATYPE = np.float32
InferredDimension = cntk_py.InferredDimension
FreeDimension = cntk_py.FreeDimension
from .internal.utils import _to_cntk_dict_value
import _cntk_py
cntk_py.Dictionary.__setitem__ = lambda self, key, value: _cntk_py.Dictionary___setitem__(self, key, _to_cntk_dict_value(value))
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
python/tests/utils.py | #
# MIT License
#
# Copyright (c) 2022 Axel Pettersson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import unittest
from pyspark import SQLContext
from pyspark.sql import SparkSession
from pyspark.sql.types import StructField, StructType
from ackuq.pit.context import PitContext
class SparkTests(unittest.TestCase):
def setUp(self) -> None:
self.jar_location = os.environ["SCALA_PIT_JAR"]
print("Loading jar from location: {}".format(self.jar_location))
self.spark = (
SparkSession.builder.appName("sparkTests")
.master("local")
.config("spark.ui.showConsoleProgress", False)
.config("spark.driver.extraClassPath", self.jar_location)
.config("spark.sql.shuffle.partitions", 1)
.getOrCreate()
)
self.sql_context = SQLContext(self.spark.sparkContext)
self.pit_context = PitContext(self.sql_context)
def tearDown(self) -> None:
self.spark.stop()
def _assertFieldsEqual(self, a: StructField, b: StructField):
self.assertEqual(a.name.lower(), b.name.lower())
self.assertEqual(a.dataType, b.dataType)
def _assertSchemaContainsField(self, schema: StructType, field: StructField):
self.assertTrue(field.name.lower() in schema.fieldNames())
self._assertFieldsEqual(field, schema[field.name])
def assertSchemaEqual(self, a: StructType, b: StructType):
self.assertEqual(len(a), len(b))
for field in b.fields:
self._assertSchemaContainsField(a, field)
| []
| []
| [
"SCALA_PIT_JAR"
]
| [] | ["SCALA_PIT_JAR"] | python | 1 | 0 | |
step1_search_by_location.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import os
import json
import logging
import copy
import requests
from requests.auth import HTTPBasicAuth
countries = json.load(open('africa_data.json'))
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
output = 'step1.json'
oneMinute = 60
oneHour = oneMinute * 60
minRemainingToStop = 30
reqs = 0
reqsLimit = None
reqsRemaining = None
headers = {}
TOKEN_AUTH = HTTPBasicAuth(GITHUB_TOKEN, "x-oauth-basic")
allUsers = []
def addTo(searchTerm, allUsers, countryStub, city=None):
def usersFrom(location):
complete = False
page = 1
users = []
order = 'asc'
while not complete:
if page > 10:
# well, we can't query anymore.
if order == 'desc':
complete = True
continue
order = 'desc'
page = 1
req = requests.get(
'https://api.github.com/legacy/user/search/location:%s' %
location,
headers=headers, params={'start_page': page,
'sort': 'joined',
'order': order},
auth=TOKEN_AUTH)
page += 1
try:
jsusers = json.loads(req.content).get('users')
if not len(jsusers):
complete = True
continue
users += jsusers
except:
logger.warning("Failed to parse JSON:")
logger.warning(req.content)
complete = True
return users
jsonUsers = usersFrom(searchTerm)
if not len(jsonUsers):
return
for user in jsonUsers:
logger.info("FOUND -- %s -- %s" % (user.get('username'),
user.get('location')))
user.update({'country': countryStub,
'city': city})
allUsers.append(user)
for countryCode, country in countries.items():
logger.info("COUNTRY: %s" % country.get('name'))
countryStub = copy.copy(country)
countryStub.update({'code': countryCode})
del(countryStub['patterns'])
for city in country.get('patterns', []):
logging.info("SEARCHING for city -- %s" % city.get('name'))
for searchName in city.get('patterns', [city.get('name')]):
addTo(searchName, allUsers, countryStub, city)
for name in country.get('names', []):
logging.info("SEARCHING for country -- %s" % name)
addTo(name, allUsers, countryStub, None)
logger.info("Found %d records" % len(allUsers))
json.dump(allUsers, open(output, 'w'), indent=4)
logger.info("UNIQUE user accounts: %d" %
len(list(set([u.get('username') for u in allUsers]))))
| []
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | python | 1 | 0 | |
cmd/main.go | package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/tetsucceed/byrdy/pkg/db"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"log"
"net/http"
"os"
)
func ShowEditPost(rc *gin.Context) {
rc.JSON(http.StatusOK, gin.H{"title": "Hello"})
}
func main() {
dbHost := os.Getenv("DB_HOST")
dbName := os.Getenv("DB_NAME")
dbUser := os.Getenv("DB_USER")
dbPassword := os.Getenv("DB_PASSWORD")
dsnUrl := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=5432", dbHost,
dbUser, dbPassword, dbName)
pdb, err := gorm.Open(postgres.Open(dsnUrl), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
_ = pdb.AutoMigrate(&db.Structure{})
env := db.Env{
Links: db.LinkDbModel{DB: pdb},
}
// connect to postgresql and apply migrations
log.Print("Starting")
r := gin.Default()
r.GET("/", ShowEditPost)
r.GET("/api/get", env.GetLink)
r.POST("/api/create", env.SaveShort)
_ = r.Run(":8080")
}
| [
"\"DB_HOST\"",
"\"DB_NAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\""
]
| []
| [
"DB_PASSWORD",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASSWORD", "DB_USER", "DB_NAME", "DB_HOST"] | go | 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.