filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
inception_score.py | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import torch.utils.data
from torchvision.models.inception import inception_v3
import numpy as np
from scipy.stats import entropy
from inception import InceptionV3
import torchvision.datasets as dset
import torchvision.transforms as transforms
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import pathlib
from tqdm import tqdm
from scipy.misc import imread, imresize
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def get_pred(x, model):
tmp = model.model(x)
tmp = model.emo_layer(tmp)
return F.softmax(tmp).data.cpu().numpy()
def get_scores(files, model, batch_size=50, dims=8,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.model.eval()
model.emo_layer.eval()
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
N = len(files)
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches))
start = i * batch_size
end = start + batch_size
images = [imread(str(f)).astype(np.float32)
for f in files[start:end]]
single_channel_images = [np.stack((img,)*3, axis=-1)
for img in images if len(img.shape)==2]
images.extend(single_channel_images)
images = np.array([imresize(img, (299, 299)).astype(np.float32)
for img in images if len(img.shape)>2 and img.shape[2]==3])
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = get_pred(batch, model)
pred_arr[start:end] = pred.reshape(batch_size, -1)
# Now compute the mean kl-div
split_scores = []
splits = 8
for k in range(splits):
part = pred_arr[k * (N // splits): (k+1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
if verbose:
print(' done')
return np.mean(split_scores), np.std(split_scores)
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model = InceptionV3()
if args.gpu != '':
model.cuda()
for p in args.path:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
path = pathlib.Path(p)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = get_scores(files, model, batch_size=50, dims=8,
cuda=args.gpu != '', verbose=True)
print('For path -> %s , the inception scores are : mean: %.3f, STD: %.3f ' % (p, m, s))
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
vendor/github.com/lestrrat/go-server-starter/listener/listener.go | package listener
import (
"fmt"
"net"
"os"
"regexp"
"strings"
"strconv"
)
// Listener is the interface for things that listen on file descriptors
// specified by Start::Server / server_starter
type Listener interface {
Fd() uintptr
Listen() (net.Listener, error)
String() string
}
// ListenerList holds a list of Listeners. This is here just for convenience
// so that you can do
// list.String()
// to get a string compatible with SERVER_STARTER_PORT
type ListenerList []Listener
func (ll ListenerList) String() string {
list := make([]string, len(ll))
for i, l := range ll {
list[i] = l.String()
}
return strings.Join(list, ";")
}
// TCPListener is a listener for ... tcp duh.
type TCPListener struct {
Addr string
Port int
fd uintptr
}
// UnixListener is a listener for unix sockets.
type UnixListener struct {
Path string
fd uintptr
}
func (l TCPListener) String() string {
if l.Addr == "0.0.0.0" {
return fmt.Sprintf("%d=%d", l.Port, l.fd)
}
return fmt.Sprintf("%s:%d=%d", l.Addr, l.Port, l.fd)
}
// Fd returns the underlying file descriptor
func (l TCPListener) Fd() uintptr {
return l.fd
}
// Listen creates a new Listener
func (l TCPListener) Listen() (net.Listener, error) {
return net.FileListener(os.NewFile(l.Fd(), fmt.Sprintf("%s:%d", l.Addr, l.Port)))
}
func (l UnixListener) String() string {
return fmt.Sprintf("%s=%d", l.Path, l.fd)
}
// Fd returns the underlying file descriptor
func (l UnixListener) Fd() uintptr {
return l.fd
}
// Listen creates a new Listener
func (l UnixListener) Listen() (net.Listener, error) {
return net.FileListener(os.NewFile(l.Fd(), l.Path))
}
// Being lazy here...
var reLooksLikeHostPort = regexp.MustCompile(`^(\d+):(\d+)$`)
var reLooksLikePort = regexp.MustCompile(`^\d+$`)
func parseListenTargets(str string) ([]Listener, error) {
rawspec := strings.Split(str, ";")
ret := make([]Listener, len(rawspec))
for i, pairString := range rawspec {
pair := strings.Split(pairString, "=")
hostPort := strings.TrimSpace(pair[0])
fdString := strings.TrimSpace(pair[1])
fd, err := strconv.ParseUint(fdString, 10, 0)
if err != nil {
return nil, fmt.Errorf("failed to parse '%s' as listen target: %s", pairString, err)
}
if matches := reLooksLikeHostPort.FindAllString(hostPort, -1); matches != nil {
port, err := strconv.ParseInt(matches[1], 10, 0)
if err != nil {
return nil, err
}
ret[i] = TCPListener{
Addr: matches[0],
Port: int(port),
fd: uintptr(fd),
}
} else if match := reLooksLikePort.FindString(hostPort); match != "" {
port, err := strconv.ParseInt(match, 10, 0)
if err != nil {
return nil, err
}
ret[i] = TCPListener{
Addr: "0.0.0.0",
Port: int(port),
fd: uintptr(fd),
}
} else {
ret[i] = UnixListener{
Path: hostPort,
fd: uintptr(fd),
}
}
}
return ret, nil
}
// Ports parses environment variable SERVER_STARTER_PORT
func Ports() ([]Listener, error) {
return parseListenTargets(os.Getenv("SERVER_STARTER_PORT"))
}
// ListenAll parses environment variable SERVER_STARTER_PORT, and creates
// net.Listener objects
func ListenAll() ([]net.Listener, error) {
targets, err := parseListenTargets(os.Getenv("SERVER_STARTER_PORT"))
if err != nil {
return nil, err
}
ret := make([]net.Listener, len(targets))
for i, target := range targets {
ret[i], err = target.Listen()
if err != nil {
// Close everything up to this listener
for x := 0; x < i; x++ {
ret[x].Close()
}
return nil, err
}
}
return ret, nil
}
| [
"\"SERVER_STARTER_PORT\"",
"\"SERVER_STARTER_PORT\""
]
| []
| [
"SERVER_STARTER_PORT"
]
| [] | ["SERVER_STARTER_PORT"] | go | 1 | 0 | |
python/setup.py | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
import filecmp
import glob
import os
import re
import shutil
import sysconfig
from distutils.sysconfig import get_python_lib
from Cython.Build import cythonize
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = ["numba", "cython"]
def get_cuda_version_from_header(cuda_include_dir):
cuda_version = None
with open(
os.path.join(cuda_include_dir, "cuda.h"), "r", encoding="utf-8"
) as f:
for line in f.readlines():
if re.search(r"#define CUDA_VERSION ", line) is not None:
cuda_version = line
break
if cuda_version is None:
raise TypeError("CUDA_VERSION not found in cuda.h")
cuda_version = int(cuda_version.split()[2])
return "%d.%d" % (cuda_version // 1000, (cuda_version % 1000) // 10)
cython_tests = glob.glob("rmm/_lib/tests/*.pyx")
CUDA_HOME = os.environ.get("CUDA_HOME", False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which("cuda-gdb")
if path_to_cuda_gdb is None:
raise OSError(
"Could not locate CUDA. "
"Please set the environment variable "
"CUDA_HOME to the path to the CUDA installation "
"and try again."
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}")
cuda_include_dir = os.path.join(CUDA_HOME, "include")
cuda_lib_dir = os.path.join(CUDA_HOME, "lib64")
CUDA_VERSION = get_cuda_version_from_header(cuda_include_dir)
INSTALL_PREFIX = os.environ.get("INSTALL_PREFIX", False)
if os.path.isdir(INSTALL_PREFIX):
rmm_include_dir = os.path.join(INSTALL_PREFIX, "include")
else:
# use uninstalled headers in source tree
rmm_include_dir = "../include"
# Preprocessor step to specify correct pxd file with
# valid symbols for specific version of CUDA.
cwd = os.getcwd()
files_to_preprocess = ["gpu.pxd"]
# The .pxi file is unchanged between some CUDA versions
# (e.g., 11.0 & 11.1), so we keep only a single copy
# of it
cuda_version_to_pxi_dir = {
"10.1": "10.1",
"10.2": "10.2",
"11.0": "11.x",
"11.1": "11.x",
"11.2": "11.x",
}
for pxd_basename in files_to_preprocess:
pxi_basename = os.path.splitext(pxd_basename)[0] + ".pxi"
if CUDA_VERSION in cuda_version_to_pxi_dir:
pxi_pathname = os.path.join(
cwd,
"rmm/_cuda",
cuda_version_to_pxi_dir[CUDA_VERSION],
pxi_basename,
)
pxd_pathname = os.path.join(cwd, "rmm/_cuda", pxd_basename)
try:
if filecmp.cmp(pxi_pathname, pxd_pathname):
# files are the same, no need to copy
continue
except FileNotFoundError:
# pxd_pathname doesn't exist yet
pass
shutil.copyfile(pxi_pathname, pxd_pathname)
else:
raise TypeError(f"{CUDA_VERSION} is not supported.")
try:
nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0")
except Exception:
nthreads = 0
include_dirs = [
rmm_include_dir,
os.path.dirname(sysconfig.get_path("include")),
cuda_include_dir,
]
library_dirs = [
get_python_lib(),
os.path.join(os.sys.prefix, "lib"),
cuda_lib_dir,
]
# lib:
extensions = cythonize(
[
Extension(
"*",
sources=["rmm/_lib/*.pyx"],
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=[
cuda_lib_dir,
os.path.join(os.sys.prefix, "lib"),
],
libraries=["cuda", "cudart"],
language="c++",
extra_compile_args=["-std=c++17"],
)
],
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True,
),
)
# cuda:
extensions += cythonize(
[
Extension(
"*",
sources=["rmm/_cuda/*.pyx"],
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=[
cuda_lib_dir,
os.path.join(os.sys.prefix, "lib"),
],
libraries=["cuda", "cudart"],
language="c++",
extra_compile_args=["-std=c++14"],
)
],
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True,
),
)
# tests:
extensions += cythonize(
[
Extension(
"*",
sources=cython_tests,
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=[
cuda_lib_dir,
os.path.join(os.sys.prefix, "lib"),
],
libraries=["cuda", "cudart"],
language="c++",
extra_compile_args=["-std=c++14"],
)
],
nthreads=nthreads,
compiler_directives=dict(
profile=True, language_level=3, embedsignature=True, binding=True
),
)
setup(
name="rmm",
version="21.08.00",
description="rmm - RAPIDS Memory Manager",
url="https://github.com/rapidsai/rmm",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# Include the separately-compiled shared library
setup_requires=["Cython>=0.29,<0.30"],
extras_require={"test": ["pytest", "pytest-xdist"]},
ext_modules=extensions,
packages=find_packages(include=["rmm", "rmm.*"]),
package_data=dict.fromkeys(
find_packages(include=["rmm._lib", "rmm._lib.includes", "rmm._cuda*"]),
["*.hpp", "*.pxd"],
),
cmdclass=versioneer.get_cmdclass(),
install_requires=install_requires,
zip_safe=False,
)
| []
| []
| [
"INSTALL_PREFIX",
"PARALLEL_LEVEL",
"CUDA_HOME"
]
| [] | ["INSTALL_PREFIX", "PARALLEL_LEVEL", "CUDA_HOME"] | python | 3 | 0 | |
python/pyarrow/tests/test_hdfs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from io import BytesIO
from os.path import join as pjoin
import os
import random
import unittest
import numpy as np
import pandas.util.testing as pdt
import pytest
from pyarrow.compat import guid
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client(driver='libhdfs'):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
user = os.environ['ARROW_HDFS_TEST_USER']
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 20500))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
return pa.hdfs.connect(host, port, user, driver=driver)
@pytest.mark.hdfs
class HdfsTestCases(object):
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client(cls.DRIVER)
cls.tmp_path = '/tmp/pyarrow-test-{0}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write('a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
@test_parquet.parquet
def test_read_multiple_parquet_files(self):
import pyarrow.parquet as pq
nfiles = 10
size = 5
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
test_data = []
paths = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
paths.append(path)
result = self.hdfs.read_parquet(tmpdir)
expected = pa.concat_tables(test_data)
pdt.assert_frame_equal(result.to_pandas()
.sort_values(by='index').reset_index(drop=True),
expected.to_pandas())
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
pytest.fail('No libhdfs available on system')
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
class TestLibHdfs3(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs3'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs3():
pytest.fail('No libhdfs3 available on system')
| []
| []
| [
"ARROW_HDFS_TEST_USER",
"ARROW_HDFS_TEST_PORT",
"ARROW_HDFS_TEST_HOST"
]
| [] | ["ARROW_HDFS_TEST_USER", "ARROW_HDFS_TEST_PORT", "ARROW_HDFS_TEST_HOST"] | python | 3 | 0 | |
Archive/Course_resources/examples/communication/bluetooth/write_gatt.py | #!/usr/bin/env python3
# Import required library
import pygatt
import signal
from dotenv import load_dotenv
import os
import time
from dcd.entities.thing import Thing
from dcd.entities.property_type import PropertyType
import requests
def dcd_hub_status():
"""
Return the DCD Hub status:
- 0: Connection successful
- 1: Could not reach the hub (Connection or Hub issue)
"""
uri = "https://dwd.tudelft.nl/api/health"
try:
json_result = requests.get(uri, timeout=1).json()
if json_result["status"] is 0:
# We received a response with status = 0, everything is fine
return 0
# In any other case, there is a issue
return 1
except Exception as e:
# Show some information about the error
print(str(e))
# Return 1, the connection wasn't successful
return 1
def discover_characteristic(device):
for uuid in device.discover_characteristics().keys():
try:
print("Read UUID" + str(uuid) + " " + str(device.char_read(uuid)))
except:
print("Something wrong with " + str(uuid))
# The thing ID and access token
load_dotenv()
THING_ID = os.environ['THING_ID']
THING_TOKEN = os.environ['THING_TOKEN']
BLUETOOTH_DEVICE_MAC = os.environ['BLUETOOTH_DEVICE_MAC']
# UUID of the GATT characteristic to subscribe
GATT_CHARACTERISTIC_LED = "00002345-0000-1000-8000-00805f9b34fb"
# Many devices, e.g. Fitbit, use random addressing, this is required to connect.
ADDRESS_TYPE = pygatt.BLEAddressType.random
# Start a BLE adapter
bleAdapter = pygatt.GATTToolBackend()
bleAdapter.start()
# User the BLE adapter to connect to our device
my_device = bleAdapter.connect(BLUETOOTH_DEVICE_MAC, address_type=ADDRESS_TYPE)
while True:
hub_status = dcd_hub_status()
print(hub_status)
if hub_status is 0:
print("Internet available")
my_device.char_write(GATT_CHARACTERISTIC_LED, bytearray([0xFF, 0x00, 0x00]))
else:
print("Internet not available")
my_device.char_write(GATT_CHARACTERISTIC_LED, bytearray([0x00, 0x00, 0x00]))
time.sleep(2) | []
| []
| [
"BLUETOOTH_DEVICE_MAC",
"THING_TOKEN",
"THING_ID"
]
| [] | ["BLUETOOTH_DEVICE_MAC", "THING_TOKEN", "THING_ID"] | python | 3 | 0 | |
emojivoto-emoji-svc/cmd/server.go | package main
import (
"fmt"
"log"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"contrib.go.opencensus.io/exporter/ocagent"
"github.com/edgelesssys/emojivoto/emojivoto-emoji-svc/api"
"github.com/edgelesssys/emojivoto/emojivoto-emoji-svc/emoji"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/trace"
"google.golang.org/grpc"
)
var (
grpcPort = os.Getenv("GRPC_PORT")
promPort = os.Getenv("PROM_PORT")
ocagentHost = os.Getenv("OC_AGENT_HOST")
)
func main() {
if grpcPort == "" {
log.Fatalf("GRPC_PORT (currently [%s]) environment variable must me set to run the server.", grpcPort)
}
oce, err := ocagent.NewExporter(
ocagent.WithInsecure(),
ocagent.WithReconnectionPeriod(5*time.Second),
ocagent.WithAddress(ocagentHost),
ocagent.WithServiceName("voting"))
if err != nil {
log.Fatalf("Failed to create ocagent-exporter: %v", err)
}
trace.RegisterExporter(oce)
allEmoji := emoji.NewAllEmoji()
lis, err := net.Listen("tcp", fmt.Sprintf(":%s", grpcPort))
if err != nil {
panic(err)
}
errs := make(chan error, 1)
if promPort != "" {
// Start prometheus server
go func() {
log.Printf("Starting prom metrics on PROM_PORT=[%s]", promPort)
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%s", promPort), nil)
errs <- err
}()
}
// Start grpc server
go func() {
grpc_prometheus.EnableHandlingTimeHistogram()
grpcServer := grpc.NewServer(
grpc.StatsHandler(&ocgrpc.ServerHandler{}),
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
)
api.NewGrpServer(grpcServer, allEmoji)
log.Printf("Starting grpc server on GRPC_PORT=[%s]", grpcPort)
err := grpcServer.Serve(lis)
errs <- err
}()
// Catch shutdown
go func() {
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGQUIT)
s := <-sig
errs <- fmt.Errorf("caught signal %v", s)
}()
log.Fatal(<-errs)
}
| [
"\"GRPC_PORT\"",
"\"PROM_PORT\"",
"\"OC_AGENT_HOST\""
]
| []
| [
"OC_AGENT_HOST",
"PROM_PORT",
"GRPC_PORT"
]
| [] | ["OC_AGENT_HOST", "PROM_PORT", "GRPC_PORT"] | go | 3 | 0 | |
http/transport.go | package http
import (
"crypto/tls"
"net"
"net/http"
"time"
)
// NewDefaultTransport creates a new transport with sane defaults.
func NewDefaultTransport() *http.Transport {
// These defaults are copied from http.DefaultTransport.
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Below are changes from http.DefaultTransport
MaxIdleConnsPerHost: 100, // increased from 2, services tend to connect to a single host
}
}
// NewDefaultTransportWithTLS creates a new transport with the specified TLS configuration.
func NewDefaultTransportWithTLS(tlsConfig *tls.Config) *http.Transport {
t := NewDefaultTransport()
t.TLSClientConfig = tlsConfig
return t
}
| []
| []
| []
| [] | [] | go | null | null | null |
prometheusrw/prometheusrw.go | package main
import (
"bytes"
"crypto/tls"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"github.com/gobwas/glob"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/prompb"
)
// Structs to hold XML parsing of input from Splunk
type input struct {
XMLName xml.Name `xml:"input"`
ServerHost string `xml:"server_host"`
ServerURI string `xml:"server_uri"`
SessionKey string `xml:"session_key"`
CheckpointDir string `xml:"checkpoint_dir"`
Configuration configuration `xml:"configuration"`
}
type configuration struct {
XMLName xml.Name `xml:"configuration"`
Stanzas []stanza `xml:"stanza"`
}
type stanza struct {
XMLName xml.Name `xml:"stanza"`
Params []param `xml:"param"`
Name string `xml:"name,attr"`
}
type param struct {
XMLName xml.Name `xml:"param"`
Name string `xml:"name,attr"`
Value string `xml:",chardata"`
}
type feed struct {
XMLName xml.Name `xml:"feed"`
Keys []key `xml:"entry>content>dict>key"`
}
type key struct {
XMLName xml.Name `xml:"key"`
Name string `xml:"name,attr"`
Value string `xml:",chardata"`
}
// End XML structs
// Structs store final config
type inputConfig struct {
BearerToken string
Whitelist []glob.Glob
Blacklist []glob.Glob
Index string
Sourcetype string
Host string
MetricNamePrefix string // Add a custom prefix to metric name
MetricNameParse bool // Parse metric according to splunk prefix
}
type globalConfig struct {
ListenAddr string
MaxClients int
Disabled bool
EnableTLS bool
CertFile string
KeyFile string
}
// End config structs
var (
defaultMetricNamePrefix = ""
defaultMetricNameParse = false
)
func main() {
if len(os.Args) > 1 {
if os.Args[1] == "--scheme" {
fmt.Println(doScheme())
} else if os.Args[1] == "--validate-arguments" {
validateArguments()
}
} else {
log.Fatal(run())
}
return
}
func doScheme() string {
scheme := `<scheme>
<title>Prometheus Remote Write</title>
<description>Listen on a TCP port as a remote write endpoint for the Prometheus metrics server</description>
<use_external_validation>false</use_external_validation>
<streaming_mode>simple</streaming_mode>
<use_single_instance>true</use_single_instance>
<endpoint>
<arg name="bearerToken">
<title>Bearer token</title>
<description>A token configured in Prometheus to send via the Authorization header</description>
<required_on_edit>true</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="whitelist">
<title>Whitelist</title>
<description>A comma-separated list of glob patterns to match metric names and index (default *)</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="blacklist">
<title>Blacklist</title>
<description>A comma-separated list of glob patterns to match metric names and prevent indexing (default empty). Applied after whitelisting.</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</endpoint>
</scheme>`
return scheme
}
func validateArguments() {
// Currently unused
// Will be used to properly validate in future
return
}
func config() (globalConfig, map[string]inputConfig) {
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
var input input
err = xml.Unmarshal(data, &input)
if err != nil {
log.Fatal(err)
}
configMap := make(map[string]inputConfig)
for _, s := range input.Configuration.Stanzas {
var inputConfig inputConfig
// Defaults
inputConfig.MetricNamePrefix = defaultMetricNamePrefix
inputConfig.MetricNameParse = defaultMetricNameParse
for _, p := range s.Params {
if p.Name == "whitelist" {
for _, w := range strings.Split(p.Value, ",") {
inputConfig.Whitelist = append(inputConfig.Whitelist, glob.MustCompile(w))
}
}
if p.Name == "blacklist" {
for _, b := range strings.Split(p.Value, ",") {
inputConfig.Blacklist = append(inputConfig.Blacklist, glob.MustCompile(b))
}
}
if p.Name == "bearerToken" {
inputConfig.BearerToken = p.Value
}
if p.Name == "index" {
inputConfig.Index = p.Value
}
if p.Name == "sourcetype" {
inputConfig.Sourcetype = p.Value
}
if p.Name == "host" {
inputConfig.Host = p.Value
}
if p.Name == "metricNamePrefix" {
inputConfig.MetricNamePrefix = p.Value
}
if p.Name == "metricNameParse" {
inputConfig.MetricNameParse = (p.Value == "true")
}
}
configMap[inputConfig.BearerToken] = inputConfig
}
// Default global config
var globalConfig globalConfig
globalConfig.ListenAddr = ":8098"
globalConfig.MaxClients = 10
globalConfig.Disabled = true
globalConfig.EnableTLS = false
// Get the global configuration
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
client := &http.Client{Transport: tr}
req, err := http.NewRequest("GET", input.ServerURI+"/services/configs/inputs/prometheusrw", nil)
if err != nil {
log.Fatal(err)
}
req.Header.Add("Authorization", "Splunk "+input.SessionKey)
response, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
// Parse the global configuration
var feed feed
xml.Unmarshal(body, &feed)
for _, k := range feed.Keys {
if k.Name == "disabled" {
globalConfig.Disabled, _ = strconv.ParseBool(k.Value)
}
if k.Name == "port" {
port, _ := strconv.Atoi(k.Value)
globalConfig.ListenAddr = fmt.Sprintf(":%d", port)
}
if k.Name == "maxClients" {
maxClients, error := strconv.Atoi(k.Value)
if error != nil || maxClients <= 0 {
globalConfig.MaxClients = 10
} else {
globalConfig.MaxClients = maxClients
}
}
if k.Name == "enableTLS" {
globalConfig.EnableTLS, _ = strconv.ParseBool(k.Value)
}
if k.Name == "certFile" {
globalConfig.CertFile = strings.Replace(k.Value, "$SPLUNK_HOME", os.Getenv("SPLUNK_HOME"), -1)
}
if k.Name == "keyFile" {
globalConfig.KeyFile = strings.Replace(k.Value, "$SPLUNK_HOME", os.Getenv("SPLUNK_HOME"), -1)
}
}
response.Body.Close()
return globalConfig, configMap
}
func run() error {
// Output of metrics are sent to Splunk via log interface
// This ensures parallel requests don't interleave, which can happen using stdout directly
output := log.New(os.Stdout, "", 0)
// Actual logging (goes to splunkd.log)
//infoLog := log.New(os.Stderr, "INFO ", 0)
//debugLog := log.New(os.Stderr, "DEBUG ", 0)
//errLog := log.New(os.Stderr, "ERROR ", 0)
globalConfig, configMap := config()
if globalConfig.Disabled == true {
log.Fatal("Prometheus input globally disabled")
}
// Semaphore to limit to maxClients concurrency
sema := make(chan struct{}, globalConfig.MaxClients)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// Get the bearer token and corresponding config
bearerToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
if _, ok := configMap[bearerToken]; !ok {
http.Error(w, "Bearer token not recognized. Please contact your Splunk admin.", http.StatusUnauthorized)
return
}
inputConfig := configMap[bearerToken]
// This will queue a client if > maxClients are processing
sema <- struct{}{}
defer func() { <-sema }()
// A buffer to build out metrics in for this request
// We dump it all at once, as we may have index/sourcetype etc. directives and we can't have them separated from the metrics they effect by another request
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("***SPLUNK*** index=%s sourcetype=%s host=%s\n", inputConfig.Index, inputConfig.Sourcetype, inputConfig.Host))
compressed, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var req prompb.WriteRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
whitelisted := false
for _, w := range inputConfig.Whitelist {
if w.Match(string(m["__name__"])) {
whitelisted = true
}
}
if !whitelisted {
continue
}
blacklisted := false
for _, b := range inputConfig.Blacklist {
if b.Match(string(m["__name__"])) {
blacklisted = true
}
}
if blacklisted {
continue
}
if inputConfig.MetricNameParse {
m["__name__"] = formatMetricLabelValue(string(m["__name__"]), inputConfig.MetricNamePrefix)
}
for _, s := range ts.Samples {
if math.IsNaN(s.Value) || math.IsInf(s.Value, 0) {
continue
} // Splunk won't accept NaN metrics etc.
buffer.WriteString(fmt.Sprintf("%s %f %d\n", m, s.Value, s.Timestamp))
}
}
output.Print(buffer.String())
buffer.Truncate(0)
})
if globalConfig.EnableTLS == true {
return http.ListenAndServeTLS(globalConfig.ListenAddr, globalConfig.CertFile, globalConfig.KeyFile, nil)
} else {
return http.ListenAndServe(globalConfig.ListenAddr, nil)
}
}
func formatMetricLabelValue(value string, prefix string) model.LabelValue {
s := []string{}
s = append(s, prefix)
s = append(s, regexp.MustCompile("_").ReplaceAllString(value, "."))
return model.LabelValue(strings.Join(s, ""))
}
| [
"\"SPLUNK_HOME\"",
"\"SPLUNK_HOME\""
]
| []
| [
"SPLUNK_HOME"
]
| [] | ["SPLUNK_HOME"] | go | 1 | 0 | |
api/handlers_test.go | package api
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"runtime"
"sync"
"testing"
"time"
"github.com/dcos/dcos-diagnostics/config"
"github.com/gorilla/mux"
assertPackage "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
var DiagnosticsBundleDir = "/tmp/snapshot-test"
var testCfg *config.Config
func init() {
if runtime.GOOS == "windows" {
DiagnosticsBundleDir = os.Getenv("SYSTEMDRIVE") + "\\tmp\\snapshot-test"
}
testCfg = &config.Config{
FlagRole: "master",
FlagDiagnosticsBundleDir: DiagnosticsBundleDir,
FlagPort: 1050,
FlagMasterPort: 1050,
}
}
// fakeDCOSTools is a DCOSHelper interface implementation used for testing.
type fakeDCOSTools struct {
sync.Mutex
units []string
fakeHTTPResponses []*httpResponse
fakeMasters []Node
// HTTP GET, POST
mockedRequest map[string]FakeHTTPContainer
getRequestsMade []string
postRequestsMade []string
rawRequestsMade []*http.Request
}
type FakeHTTPContainer struct {
mockResponse []byte
mockStatusCode int
mockErr error
}
func (st *fakeDCOSTools) makeMockedResponse(url string, response []byte, statusCode int, e error) error {
if _, ok := st.mockedRequest[url]; ok {
return errors.New(url + " is already added")
}
st.mockedRequest = make(map[string]FakeHTTPContainer)
st.mockedRequest[url] = FakeHTTPContainer{
mockResponse: response,
mockStatusCode: statusCode,
mockErr: e,
}
return nil
}
func (st *fakeDCOSTools) GetHostname() (string, error) {
return "MyHostName", nil
}
func (st *fakeDCOSTools) DetectIP() (string, error) {
return "127.0.0.1", nil
}
func (st *fakeDCOSTools) GetNodeRole() (string, error) {
return "master", nil
}
func (st *fakeDCOSTools) GetUnitProperties(pname string) (map[string]interface{}, error) {
result := make(map[string]interface{})
st.units = append(st.units, pname)
if pname == "unit_to_fail" {
return result, errors.New("unit_to_fail occurred")
}
result["Id"] = pname
result["LoadState"] = "loaded"
result["ActiveState"] = "active"
result["Description"] = "PrettyName: My fake description"
result["SubState"] = "running"
return result, nil
}
func (st *fakeDCOSTools) InitializeUnitControllerConnection() error {
return nil
}
func (st *fakeDCOSTools) CloseUnitControllerConnection() error {
return nil
}
func (st *fakeDCOSTools) GetUnitNames() (units []string, err error) {
units = []string{"dcos-setup.service", "dcos-link-env.service", "dcos-download.service", "unit_a", "unit_b", "unit_c", "unit_to_fail"}
return units, err
}
func (st *fakeDCOSTools) GetJournalOutput(unit string) (string, error) {
return "journal output", nil
}
func (st *fakeDCOSTools) GetMesosNodeID() (string, error) {
return "node-id-123", nil
}
// Make HTTP GET request with a timeout.
func (st *fakeDCOSTools) Get(url string, timeout time.Duration) (body []byte, statusCode int, err error) {
st.Lock()
defer st.Unlock()
// add made GET request.
st.getRequestsMade = append(st.getRequestsMade, url)
if _, ok := st.mockedRequest[url]; ok {
return st.mockedRequest[url].mockResponse, st.mockedRequest[url].mockStatusCode, st.mockedRequest[url].mockErr
}
var response string
// master
if url == fmt.Sprintf("http://127.0.0.1:1050%s", BaseRoute) {
response = `
{
"units": [
{
"id":"dcos-setup.service",
"health":0,
"output":"",
"description":"Nice Description.",
"help":"",
"name":"PrettyName"
},
{
"id":"dcos-master.service",
"health":0,
"output":"",
"description":"Nice Master Description.",
"help":"",
"name":"PrettyName"
}
],
"hostname":"master01",
"ip":"127.0.0.1",
"dcos_version":"1.6",
"node_role":"master",
"mesos_id":"master-123",
"dcos_diagnostics_version": "0.0.7"
}`
}
// agent
if url == fmt.Sprintf("http://127.0.0.2:1050%s", BaseRoute) {
response = `
{
"units": [
{
"id":"dcos-setup.service",
"health":0,
"output":"",
"description":"Nice Description.",
"help":"",
"name":"PrettyName"
},
{
"id":"dcos-agent.service",
"health":1,
"output":"",
"description":"Nice Agent Description.",
"help":"",
"name":"PrettyName"
}
],
"hostname":"agent01",
"ip":"127.0.0.2",
"dcos_version":"1.6",
"node_role":"agent",
"mesos_id":"agent-123",
"dcos_diagnostics_version": "0.0.7"
}`
}
return []byte(response), 200, nil
}
// Post make HTTP POST request with a timeout.
func (st *fakeDCOSTools) Post(url string, timeout time.Duration) (body []byte, statusCode int, err error) {
st.Lock()
defer st.Unlock()
st.postRequestsMade = append(st.postRequestsMade, url)
return body, statusCode, nil
}
// MakeRequest makes a HTTP request
func (st *fakeDCOSTools) HTTPRequest(req *http.Request, timeout time.Duration) (resp *http.Response, err error) {
st.Lock()
defer st.Unlock()
st.rawRequestsMade = append(st.rawRequestsMade, req)
return resp, nil
}
func (st *fakeDCOSTools) UpdateHTTPResponses(responses []*httpResponse) {
st.fakeHTTPResponses = responses
}
func (st *fakeDCOSTools) GetTimestamp() time.Time {
return time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
}
func (st *fakeDCOSTools) GetMasterNodes() (nodes []Node, err error) {
if len(st.fakeMasters) > 0 {
return st.fakeMasters, nil
}
var fakeMasterHost Node
fakeMasterHost.IP = "127.0.0.1"
fakeMasterHost.Role = "master"
nodes = append(nodes, fakeMasterHost)
return nodes, nil
}
func (st *fakeDCOSTools) GetAgentNodes() (nodes []Node, err error) {
var fakeAgentHost Node
fakeAgentHost.IP = "127.0.0.2"
fakeAgentHost.Role = "agent"
nodes = append(nodes, fakeAgentHost)
return nodes, nil
}
type HandlersTestSuit struct {
suite.Suite
assert *assertPackage.Assertions
router *mux.Router
dt *Dt
mockedUnitsHealthResponseJSONStruct UnitsHealthResponseJSONStruct
mockedMonitoringResponse MonitoringResponse
}
// SetUp/Teardown
func (s *HandlersTestSuit) SetupTest() {
// setup variables
s.dt = &Dt{
Cfg: testCfg,
DtDCOSTools: &fakeDCOSTools{},
MR: &MonitoringResponse{},
}
s.router = NewRouter(s.dt)
s.assert = assertPackage.New(s.T())
// mock the response
s.mockedUnitsHealthResponseJSONStruct = UnitsHealthResponseJSONStruct{
Array: []HealthResponseValues{
{
UnitID: "dcos-master.service",
UnitHealth: 0,
UnitTitle: "Master service",
PrettyName: "DC/OS Master service Unit",
},
{
UnitID: "dcos-ddt.service",
UnitHealth: 0,
UnitTitle: "Diag service",
PrettyName: "dcos-diagnostics",
},
},
Hostname: "localhost",
IPAddress: "127.0.0.1",
DcosVersion: "1.7-dev",
Role: "master",
MesosID: "12345",
TdtVersion: "1.2.3",
}
s.mockedMonitoringResponse = MonitoringResponse{
Units: map[string]Unit{
"dcos-adminrouter-reload.service": {
UnitName: "dcos-adminrouter-reload.service",
Nodes: []Node{
{
Role: "master",
IP: "10.0.7.190",
Host: "",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0",
},
{
Role: "agent",
IP: "10.0.7.191",
Host: "",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0-S1",
},
},
Health: 0,
Title: "Reload admin router to get new DNS",
Timestamp: time.Now(),
PrettyName: "Admin Router Reload",
},
"dcos-cosmos.service": {
UnitName: "dcos-cosmos.service",
Nodes: []Node{
{
Role: "agent",
IP: "10.0.7.192",
Host: "",
Health: 1,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-cosmos.service": "Some nasty error occurred",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0-S2",
},
{
Role: "agent",
IP: "10.0.7.193",
Host: "",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0-S3",
},
},
Health: 1,
Title: "DCOS Packaging API",
Timestamp: time.Now(),
PrettyName: "Package Service",
},
},
Nodes: map[string]Node{
"10.0.7.190": {
Role: "master",
IP: "10.0.7.190",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
Units: []Unit{
{
UnitName: "dcos-adminrouter-reload.service",
Nodes: []Node{
{
Role: "master",
IP: "10.0.7.190",
Host: "",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0",
},
{
Role: "agent",
IP: "10.0.7.191",
Host: "",
Health: 0,
Output: map[string]string{
"dcos-adminrouter-reload.service": "",
"dcos-adminrouter-reload.timer": "",
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0-S1",
},
},
Health: 0,
Title: "Reload admin router to get new DNS",
Timestamp: time.Now(),
PrettyName: "Admin Router Reload",
},
},
MesosID: "ab098f2a-799c-4d85-82b2-eb5159d0ceb0",
},
},
}
// Update global monitoring responses
s.dt.MR.UpdateMonitoringResponse(&s.mockedMonitoringResponse)
}
func (s *HandlersTestSuit) TearDownTest() {
// clear global variables that might be set
s.dt.MR.UpdateMonitoringResponse(&MonitoringResponse{})
}
// Helper functions
func MakeHTTPRequest(t *testing.T, router *mux.Router, url, method string, body io.Reader) (response []byte, statusCode int, err error) {
req, err := http.NewRequest(method, url, body)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
return w.Body.Bytes(), w.Code, err
}
func (s *HandlersTestSuit) get(url string) []byte {
response, _, err := MakeHTTPRequest(s.T(), s.router, url, "GET", nil)
s.assert.Nil(err, "Error makeing GET request")
return response
}
func (s *HandlersTestSuit) TestgetAllUnitsHandlerFunc() {
// Test endpoint /system/health/v1/units
resp := s.get("/system/health/v1/units")
var response UnitsResponseJSONStruct
json.Unmarshal(resp, &response)
s.assert.NotEqual(response, UnitsResponseJSONStruct{}, "Response cannot be empty")
s.assert.Len(response.Array, 2, "Expected 2 units in response")
s.assert.Contains(response.Array, UnitResponseFieldsStruct{
UnitID: "dcos-adminrouter-reload.service",
PrettyName: "Admin Router Reload",
UnitHealth: 0,
UnitTitle: "Reload admin router to get new DNS",
})
s.assert.Contains(response.Array, UnitResponseFieldsStruct{
UnitID: "dcos-cosmos.service",
PrettyName: "Package Service",
UnitHealth: 1,
UnitTitle: "DCOS Packaging API",
})
}
func (s *HandlersTestSuit) TestgetUnitByIdHandlerFunc() {
// Test endpoint /system/health/v1/units/<Unit>
resp := s.get("/system/health/v1/units/dcos-cosmos.service")
var response UnitResponseFieldsStruct
json.Unmarshal(resp, &response)
expectedResponse := UnitResponseFieldsStruct{
UnitID: "dcos-cosmos.service",
PrettyName: "Package Service",
UnitHealth: 1,
UnitTitle: "DCOS Packaging API",
}
s.assert.NotEqual(response, UnitsResponseJSONStruct{}, "Response cannot be empty")
s.assert.Equal(response, expectedResponse, "Response is in incorrect format")
// Unit should not be found
resp = s.get("/system/health/v1/units/dcos-notfound.service")
s.assert.Equal(string(resp), "Unit dcos-notfound.service not found\n")
}
func (s *HandlersTestSuit) TestgetNodesByUnitIdHandlerFunc() {
// Test endpoint /system/health/v1/units/<Unit>/nodes
resp := s.get("/system/health/v1/units/dcos-cosmos.service/nodes")
var response NodesResponseJSONStruct
json.Unmarshal(resp, &response)
s.assert.NotEqual(response, NodesResponseJSONStruct{}, "Response cannot be empty")
s.assert.Len(response.Array, 2, "Number of hosts must be 2")
s.assert.Contains(response.Array, &NodeResponseFieldsStruct{
HostIP: "10.0.7.192",
NodeHealth: 1,
NodeRole: "agent",
})
s.assert.Contains(response.Array, &NodeResponseFieldsStruct{
HostIP: "10.0.7.193",
NodeHealth: 0,
NodeRole: "agent",
})
// Unit should not be found and no nodes should be returned
resp = s.get("/system/health/v1/units/dcos-notfound.service/nodes")
s.assert.Equal(string(resp), "Unit dcos-notfound.service not found\n")
}
func (s *HandlersTestSuit) TestgetNodeByUnitIdNodeIdHandlerFunc() {
// Test endpoint /system/health/v1/units/<unitid>/nodes/<nodeid>
resp := s.get("/system/health/v1/units/dcos-cosmos.service/nodes/10.0.7.192")
var response NodeResponseFieldsWithErrorStruct
json.Unmarshal(resp, &response)
s.assert.NotEqual(response, NodeResponseFieldsWithErrorStruct{}, "Response should not be empty")
expectedResponse := NodeResponseFieldsWithErrorStruct{
HostIP: "10.0.7.192",
NodeHealth: 1,
NodeRole: "agent",
UnitOutput: "Some nasty error occurred",
Help: "Node available at `dcos node ssh -mesos-id ab098f2a-799c-4d85-82b2-eb5159d0ceb0-S2`. Try, `journalctl -xv` to diagnose further.",
}
s.assert.Equal(response, expectedResponse, "Response is in incorrect format")
// use wrong Unit
resp = s.get("/system/health/v1/units/dcos-notfound.service/nodes/10.0.7.192")
s.assert.Equal(string(resp), "Unit dcos-notfound.service not found\n")
// use wrong node
resp = s.get("/system/health/v1/units/dcos-cosmos.service/nodes/127.0.0.1")
s.assert.Equal(string(resp), "Node 127.0.0.1 not found\n")
}
func (s *HandlersTestSuit) TestgetNodesHandlerFunc() {
// Test endpoint /system/health/v1/nodes
resp := s.get("/system/health/v1/nodes")
var response NodesResponseJSONStruct
json.Unmarshal(resp, &response)
s.assert.NotEqual(response, NodesResponseJSONStruct{}, "Response cannot be empty")
s.assert.Len(response.Array, 1, "Number of nodes in respons must be 1")
s.assert.Contains(response.Array, &NodeResponseFieldsStruct{
HostIP: "10.0.7.190",
NodeHealth: 0,
NodeRole: "master",
})
}
func (s *HandlersTestSuit) TestgetNodeByIdHandlerFunc() {
// Test endpoint /system/health/v1/nodes/<nodeid>
resp := s.get("/system/health/v1/nodes/10.0.7.190")
var response NodeResponseFieldsStruct
json.Unmarshal(resp, &response)
s.assert.Equal(response, NodeResponseFieldsStruct{
HostIP: "10.0.7.190",
NodeHealth: 0,
NodeRole: "master",
})
// use wrong host
resp = s.get("/system/health/v1/nodes/127.0.0.1")
s.assert.Equal(string(resp), "Node 127.0.0.1 not found\n")
}
func (s *HandlersTestSuit) TestgetNodeUnitsByNodeIdHandlerFunc() {
// Test endpoint /system/health/v1/nodes/<nodeid>/units
resp := s.get("/system/health/v1/nodes/10.0.7.190/units")
var response UnitsResponseJSONStruct
json.Unmarshal(resp, &response)
s.assert.NotEqual(response, UnitsResponseJSONStruct{}, "Response cannot be empty")
s.assert.Len(response.Array, 1, "Response should have 1 Unit")
s.assert.Contains(response.Array, UnitResponseFieldsStruct{
UnitID: "dcos-adminrouter-reload.service",
PrettyName: "Admin Router Reload",
UnitHealth: 0,
UnitTitle: "Reload admin router to get new DNS",
})
// use wrong host
resp = s.get("/system/health/v1/nodes/127.0.0.1/units")
s.assert.Equal(string(resp), "Node 127.0.0.1 not found\n")
}
func (s *HandlersTestSuit) TestgetNodeUnitByNodeIdUnitIdHandlerFunc() {
// Test endpoint /system/health/v1/nodes/<nodeid>/units/<unitid>
resp := s.get("/system/health/v1/nodes/10.0.7.190/units/dcos-adminrouter-reload.service")
var response UnitResponseFieldsStruct
json.Unmarshal(resp, &response)
s.assert.Equal(response, UnitResponseFieldsStruct{
UnitID: "dcos-adminrouter-reload.service",
PrettyName: "Admin Router Reload",
UnitHealth: 0,
UnitTitle: "Reload admin router to get new DNS",
})
// use wrong host
resp = s.get("/system/health/v1/nodes/127.0.0.1/units/dcos-adminrouter-reload.service")
s.assert.Equal(string(resp), "Node 127.0.0.1 not found\n")
// use wrong service
resp = s.get("/system/health/v1/nodes/10.0.7.190/units/dcos-bad.service")
s.assert.Equal(string(resp), "Unit dcos-bad.service not found\n")
}
func (s *HandlersTestSuit) TestreportHandlerFunc() {
// Test endpoint /system/health/v1/report
resp := s.get("/system/health/v1/report")
var response MonitoringResponse
json.Unmarshal(resp, &response)
s.assert.Len(response.Units, 2)
s.assert.Len(response.Nodes, 1)
}
func (s *HandlersTestSuit) TestIsInListFunc() {
array := []string{"DC", "OS", "SYS"}
s.assert.Equal(isInList("DC", array), true, "DC should be in test array")
s.assert.Equal(isInList("CD", array), false, "CD should not be in test array")
}
func TestHandlersTestSuit(t *testing.T) {
suite.Run(t, new(HandlersTestSuit))
}
| [
"\"SYSTEMDRIVE\""
]
| []
| [
"SYSTEMDRIVE"
]
| [] | ["SYSTEMDRIVE"] | go | 1 | 0 | |
tests/ludwig/utils/test_torch_utils.py | import contextlib
import os
from typing import List
from unittest.mock import Mock, patch
import pytest
import torch
from ludwig.utils.torch_utils import (
_get_torch_init_params,
_set_torch_init_params,
initialize_pytorch,
sequence_length_2D,
sequence_length_3D,
)
@pytest.mark.parametrize("input_sequence", [[[0, 1, 1], [2, 0, 0], [3, 3, 3]]])
@pytest.mark.parametrize("expected_output", [[2, 1, 3]])
def test_sequence_length_2D(input_sequence: List[List[int]], expected_output: List[int]):
output_seq_length = sequence_length_2D(torch.tensor(input_sequence))
assert torch.equal(torch.tensor(expected_output), output_seq_length)
@pytest.mark.parametrize("input_sequence", [[[[-1, 0, 1], [1, -2, 0]], [[0, 0, 0], [3, 0, -2]]]])
@pytest.mark.parametrize("expected_output", [[2, 1]])
def test_sequence_length_3D(input_sequence: List[List[List[int]]], expected_output: List[int]):
input_sequence = torch.tensor(input_sequence, dtype=torch.int32)
expected_output = torch.tensor(expected_output, dtype=torch.int32)
output_seq_length = sequence_length_3D(input_sequence)
assert torch.equal(expected_output, output_seq_length)
@contextlib.contextmanager
def clean_params():
prev = _get_torch_init_params()
try:
_set_torch_init_params(None)
if "CUDA_VISIBLE_DEVICES" in os.environ:
del os.environ["CUDA_VISIBLE_DEVICES"]
yield
finally:
_set_torch_init_params(prev)
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_only_once(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
# During first time initialization, set pytorch parallelism
initialize_pytorch(allow_parallel_threads=False)
mock_torch.set_num_threads.assert_called_once()
mock_torch.set_num_interop_threads.assert_called_once()
# Reset call counts on all threading calls
mock_torch.reset_mock()
# In the second call to initialization, avoid calling these methods again, as pytorch
# will raise an exception
initialize_pytorch(allow_parallel_threads=False)
mock_torch.set_num_threads.assert_not_called()
mock_torch.set_num_interop_threads.assert_not_called()
# No GPUs were specified, so this should not have been called even once
mock_torch.cuda.memory.set_per_process_memory_fraction.assert_not_called()
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_list(mock_torch):
# For test purposes, these devices can be anything, we just need to be able to uniquely
# identify them.
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=[1, 2])
assert os.environ["CUDA_VISIBLE_DEVICES"] == "1,2"
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_string(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus="1,2")
assert os.environ["CUDA_VISIBLE_DEVICES"] == "1,2"
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_int(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=1)
mock_torch.cuda.set_device.assert_called_with(1)
assert "CUDA_VISIBLE_DEVICES" not in os.environ
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_without_gpu(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=-1)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 4
with clean_params():
initialize_pytorch(horovod=mock_hvd)
mock_torch.cuda.set_device.assert_called_with(1)
assert "CUDA_VISIBLE_DEVICES" not in os.environ
@patch("ludwig.utils.torch_utils.warnings")
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod_bad_local_rank(mock_torch, mock_warnings):
"""In this scenario, the local_size 5 is out of the bounds of the GPU indices."""
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 5
with clean_params():
initialize_pytorch(horovod=mock_hvd)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
mock_warnings.warn.assert_called()
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod_explicit_gpus(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 4
with clean_params():
initialize_pytorch(gpus="-1", horovod=mock_hvd)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/buildah-test/main.go | package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/util"
"github.com/werf/werf/pkg/buildah"
"github.com/werf/werf/pkg/werf"
)
var errUsage = errors.New("./buildah-test {auto|native-rootless|docker-with-fuse} DOCKERFILE_PATH [CONTEXT_PATH]")
func do(ctx context.Context) error {
var mode buildah.Mode
if v := os.Getenv("BUILDAH_TEST_MODE"); v != "" {
mode = buildah.Mode(v)
} else {
if len(os.Args) < 2 {
return errUsage
}
mode = buildah.ResolveMode(buildah.Mode(os.Args[1]))
os.Setenv("BUILDAH_TEST_MODE", string(mode))
}
shouldTerminate, err := buildah.ProcessStartupHook(mode)
if err != nil {
return fmt.Errorf("buildah process startup hook failed: %s", err)
}
if shouldTerminate {
return nil
}
if err := werf.Init("", ""); err != nil {
return fmt.Errorf("unable to init werf subsystem: %s", err)
}
mode = buildah.ResolveMode(mode)
fmt.Printf("Using buildah mode: %s\n", mode)
if mode == buildah.ModeDockerWithFuse {
if err := docker.Init(ctx, "", false, false, ""); err != nil {
return err
}
}
if len(os.Args) < 3 {
return errUsage
}
var dockerfilePath = os.Args[2]
var contextDir string
if len(os.Args) > 3 {
contextDir = os.Args[3]
}
b, err := buildah.NewBuildah(mode, buildah.BuildahOpts{})
if err != nil {
return fmt.Errorf("unable to create buildah client: %s", err)
}
dockerfileData, err := os.ReadFile(dockerfilePath)
if err != nil {
return fmt.Errorf("error reading %q: %s", dockerfilePath, err)
}
var contextTar io.Reader
if contextDir != "" {
contextTar = util.ReadDirAsTar(contextDir)
}
imageId, err := b.BuildFromDockerfile(ctx, dockerfileData, buildah.BuildFromDockerfileOpts{
ContextTar: contextTar,
CommonOpts: buildah.CommonOpts{
LogWriter: os.Stdout,
},
})
if err != nil {
return fmt.Errorf("BuildFromDockerfile failed: %s", err)
}
fmt.Fprintf(os.Stdout, "INFO: built imageId is %s\n", imageId)
return nil
}
func main() {
if err := do(context.Background()); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
}
| [
"\"BUILDAH_TEST_MODE\""
]
| []
| [
"BUILDAH_TEST_MODE"
]
| [] | ["BUILDAH_TEST_MODE"] | go | 1 | 0 | |
vendor/github.com/armory/dinghy/pkg/settings/load.go | /*
* Copyright 2019 Armory, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package settings is a single place to put all of the application settings.
package settings
import (
"encoding/json"
"errors"
"github.com/armory/go-yaml-tools/pkg/secrets"
"io/ioutil"
"os"
"strings"
"github.com/mitchellh/mapstructure"
"github.com/armory/dinghy/pkg/util"
"github.com/armory/go-yaml-tools/pkg/spring"
"github.com/imdario/mergo"
log "github.com/sirupsen/logrus"
)
func NewDefaultSettings() Settings {
return Settings{
DinghyFilename: "dinghyfile",
TemplateRepo: "dinghy-templates",
AutoLockPipelines: "true",
GitHubCredsPath: util.GetenvOrDefault("GITHUB_TOKEN_PATH", os.Getenv("HOME")+"/.armory/cache/github-creds.txt"),
GithubEndpoint: "https://api.github.com",
StashCredsPath: util.GetenvOrDefault("STASH_TOKEN_PATH", os.Getenv("HOME")+"/.armory/cache/stash-creds.txt"),
StashEndpoint: "http://localhost:7990/rest/api/1.0",
Logging: Logging{
File: "",
Level: "INFO",
},
spinnakerSupplied: spinnakerSupplied{
Orca: spinnakerService{
Enabled: "true",
BaseURL: util.GetenvOrDefault("ORCA_BASE_URL", "http://orca:8083"),
},
Front50: spinnakerService{
Enabled: "true",
BaseURL: util.GetenvOrDefault("FRONT50_BASE_URL", "http://front50:8080"),
},
Echo: spinnakerService{
BaseURL: util.GetenvOrDefault("ECHO_BASE_URL", "http://echo:8089"),
},
Fiat: fiat{
spinnakerService: spinnakerService{
Enabled: "false",
BaseURL: util.GetenvOrDefault("FIAT_BASE_URL", "http://fiat:7003"),
},
AuthUser: "",
},
Redis: Redis{
BaseURL: util.GetenvOrDefault("REDIS_HOST", "redis:6379"),
Password: util.GetenvOrDefault("REDIS_PASSWORD", ""),
},
},
ParserFormat: "json",
RepoConfig: []RepoConfig{},
}
}
// LoadSettings loads the Spring config from the default Spinnaker paths
// and merges default settings with the loaded settings
func LoadSettings() (*Settings, error) {
springConfig, err := loadProfiles()
if err != nil {
return nil, err
}
settings, err := configureSettings(NewDefaultSettings(), springConfig)
if err != nil {
return nil, err
}
return settings, nil
}
func configureSettings(defaultSettings, overrides Settings) (*Settings, error) {
if err := mergo.Merge(&defaultSettings, overrides, mergo.WithOverride); err != nil {
return nil, err
}
// If Github token not passed directly
// Required for backwards compatibility
if defaultSettings.GitHubToken == "" {
// load github api token
if _, err := os.Stat(defaultSettings.GitHubCredsPath); err == nil {
creds, err := ioutil.ReadFile(defaultSettings.GitHubCredsPath)
if err != nil {
return nil, err
}
c := strings.Split(strings.TrimSpace(string(creds)), ":")
if len(c) < 2 {
return nil, errors.New("github creds file should have format 'username:token'")
}
defaultSettings.GitHubToken = c[1]
log.Info("Successfully loaded github api creds")
}
}
// If Stash token not passed directly
// Required for backwards compatibility
if defaultSettings.StashToken == "" || defaultSettings.StashUsername == "" {
// load stash api creds
if _, err := os.Stat(defaultSettings.StashCredsPath); err == nil {
creds, err := ioutil.ReadFile(defaultSettings.StashCredsPath)
if err != nil {
return nil, err
}
c := strings.Split(strings.TrimSpace(string(creds)), ":")
if len(c) < 2 {
return nil, errors.New("stash creds file should have format 'username:token'")
}
defaultSettings.StashUsername = c[0]
defaultSettings.StashToken = c[1]
log.Info("Successfully loaded stash api creds")
}
}
// Required for backwards compatibility
if defaultSettings.Deck.BaseURL == "" && defaultSettings.SpinnakerUIURL != "" {
log.Warn("Spinnaker UI URL should be set with ${services.deck.baseUrl}")
defaultSettings.Deck.BaseURL = defaultSettings.SpinnakerUIURL
}
// Take the FiatUser setting if fiat is enabled (coming from hal settings)
if defaultSettings.Fiat.Enabled == "true" && defaultSettings.FiatUser != "" {
defaultSettings.Fiat.AuthUser = defaultSettings.FiatUser
}
if defaultSettings.ParserFormat == "" {
defaultSettings.ParserFormat = "json"
}
c, _ := json.Marshal(defaultSettings)
log.Infof("The following settings have been loaded: %v", string(c))
return &defaultSettings, nil
}
func decodeProfilesToSettings(profiles map[string]interface{}, s *Settings) error {
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
WeaklyTypedInput: true,
Result: s,
})
if err != nil {
return err
}
return decoder.Decode(profiles)
}
func loadProfiles() (Settings, error) {
// var s Settings
var config Settings
propNames := []string{"spinnaker", "dinghy"}
c, err := spring.LoadDefault(propNames)
if err != nil {
return config, err
}
if err := decodeProfilesToSettings(c, &config); err != nil {
return config, err
}
if (Secrets{}) != config.Secrets {
if (secrets.VaultConfig{}) != config.Secrets.Vault {
if err = secrets.RegisterVaultConfig(config.Secrets.Vault); err != nil {
return config, err
}
}
}
if err = decryptSecrets(&config); err != nil {
log.Fatalf("failed to decrypt secrets: %s", err)
}
return config, nil
}
func decryptSecrets(config *Settings) error {
decrypter := secrets.NewDecrypter(config.GitHubToken)
secret, err := decrypter.Decrypt()
if err != nil {
return err
}
config.GitHubToken = secret
decrypter = secrets.NewDecrypter(config.StashToken)
secret, err = decrypter.Decrypt()
if err != nil {
return err
}
config.StashToken = secret
return nil
} | [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
stacktrace_ext_test.go | // Copyright (c) 2016, 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap_test
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// _zapPackages are packages that we search for in the logging output to match a
// zap stack frame. It is different from _zapStacktracePrefixes which is only
// intended to match on the function name, while this is on the full output
// which includes filenames.
var _zapPackages = []string{
"go.uber.org/zap.",
"go.uber.org/zap/zapcore.",
}
func TestStacktraceFiltersZapLog(t *testing.T) {
withLogger(t, func(logger *zap.Logger, out *bytes.Buffer) {
logger.Error("test log")
logger.Sugar().Error("sugar test log")
require.Contains(t, out.String(), "TestStacktraceFiltersZapLog", "Should not strip out non-zap import")
verifyNoZap(t, out.String())
})
}
func TestStacktraceFiltersZapMarshal(t *testing.T) {
withLogger(t, func(logger *zap.Logger, out *bytes.Buffer) {
marshal := func(enc zapcore.ObjectEncoder) error {
logger.Warn("marshal caused warn")
enc.AddString("f", "v")
return nil
}
logger.Error("test log", zap.Object("obj", zapcore.ObjectMarshalerFunc(marshal)))
logs := out.String()
// The marshal function (which will be under the test function) should not be stripped.
const marshalFnPrefix = "TestStacktraceFiltersZapMarshal."
require.Contains(t, logs, marshalFnPrefix, "Should not strip out marshal call")
// There should be no zap stack traces before that point.
marshalIndex := strings.Index(logs, marshalFnPrefix)
verifyNoZap(t, logs[:marshalIndex])
// After that point, there should be zap stack traces - we don't want to strip out
// the Marshal caller information.
for _, fnPrefix := range _zapPackages {
require.Contains(t, logs[marshalIndex:], fnPrefix, "Missing zap caller stack for Marshal")
}
})
}
func TestStacktraceFiltersVendorZap(t *testing.T) {
// We already have the dependencies downloaded so this should be
// instant.
deps := downloadDependencies(t)
// We need to simulate a zap as a vendor library, so we're going to
// create a fake GOPATH and run the above test which will contain zap
// in the vendor directory.
withGoPath(t, func(goPath string) {
zapDir, err := os.Getwd()
require.NoError(t, err, "Failed to get current directory")
testDir := filepath.Join(goPath, "src/go.uber.org/zap_test/")
vendorDir := filepath.Join(testDir, "vendor")
require.NoError(t, os.MkdirAll(testDir, 0777), "Failed to create source director")
curFile := getSelfFilename(t)
setupSymlink(t, curFile, filepath.Join(testDir, curFile))
// Set up symlinks for zap, and for any test dependencies.
setupSymlink(t, zapDir, filepath.Join(vendorDir, "go.uber.org/zap"))
for _, dep := range deps {
setupSymlink(t, dep.Dir, filepath.Join(vendorDir, dep.ImportPath))
}
// Now run the above test which ensures we filter out zap
// stacktraces, but this time zap is in a vendor
cmd := exec.Command("go", "test", "-v", "-run", "TestStacktraceFiltersZap")
cmd.Dir = testDir
cmd.Env = append(os.Environ(), "GO111MODULE=off")
out, err := cmd.CombinedOutput()
require.NoError(t, err, "Failed to run test in vendor directory, output: %s", out)
assert.Contains(t, string(out), "PASS")
})
}
// withLogger sets up a logger with a real encoder set up, so that any marshal functions are called.
// The inbuilt observer does not call Marshal for objects/arrays, which we need for some tests.
func withLogger(t *testing.T, fn func(logger *zap.Logger, out *bytes.Buffer)) {
buf := &bytes.Buffer{}
encoder := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig())
core := zapcore.NewCore(encoder, zapcore.AddSync(buf), zapcore.DebugLevel)
logger := zap.New(core, zap.AddStacktrace(zap.DebugLevel))
fn(logger, buf)
}
func verifyNoZap(t *testing.T, logs string) {
for _, fnPrefix := range _zapPackages {
require.NotContains(t, logs, fnPrefix, "Should not strip out marshal call")
}
}
func withGoPath(t *testing.T, f func(goPath string)) {
goPath, err := ioutil.TempDir("", "gopath")
require.NoError(t, err, "Failed to create temporary directory for GOPATH")
//defer os.RemoveAll(goPath)
os.Setenv("GOPATH", goPath)
defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
f(goPath)
}
func getSelfFilename(t *testing.T) string {
_, file, _, ok := runtime.Caller(0)
require.True(t, ok, "Failed to get caller information to identify local file")
return filepath.Base(file)
}
func setupSymlink(t *testing.T, src, dst string) {
// Make sure the destination directory exists.
os.MkdirAll(filepath.Dir(dst), 0777)
// Get absolute path of the source for the symlink, otherwise we can create a symlink
// that uses relative paths.
srcAbs, err := filepath.Abs(src)
require.NoError(t, err, "Failed to get absolute path")
require.NoError(t, os.Symlink(srcAbs, dst), "Failed to set up symlink")
}
type dependency struct {
ImportPath string `json:"Path"` // import path of the dependency
Dir string `json:"Dir"` // location on disk
}
// Downloads all dependencies for the current Go module and reports their
// module paths and locations on disk.
func downloadDependencies(t *testing.T) []dependency {
cmd := exec.Command("go", "mod", "download", "-json")
stdout, err := cmd.Output()
require.NoError(t, err, "Failed to run 'go mod download'")
var deps []dependency
dec := json.NewDecoder(bytes.NewBuffer(stdout))
for dec.More() {
var d dependency
require.NoError(t, dec.Decode(&d), "Failed to decode dependency")
deps = append(deps, d)
}
return deps
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
sdk/python/pulumi_aws/iam/user_login_profile.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['UserLoginProfile']
class UserLoginProfile(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
password_length: Optional[pulumi.Input[int]] = None,
password_reset_required: Optional[pulumi.Input[bool]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an IAM User Login Profile with limited support for password creation during this provider resource creation. Uses PGP to encrypt the password for safe transport to the user. PGP keys can be obtained from Keybase.
> To reset an IAM User login password via this provider, you can use delete and recreate this resource or change any of the arguments.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_user = aws.iam.User("exampleUser",
path="/",
force_destroy=True)
example_user_login_profile = aws.iam.UserLoginProfile("exampleUserLoginProfile",
user=example_user.name,
pgp_key="keybase:some_person_that_exists")
pulumi.export("password", example_user_login_profile.encrypted_password)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] password_length: The length of the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[bool] password_reset_required: Whether the user should be forced to reset the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:username`. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[str] user: The IAM user's name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['password_length'] = password_length
__props__['password_reset_required'] = password_reset_required
if pgp_key is None:
raise TypeError("Missing required property 'pgp_key'")
__props__['pgp_key'] = pgp_key
if user is None:
raise TypeError("Missing required property 'user'")
__props__['user'] = user
__props__['encrypted_password'] = None
__props__['key_fingerprint'] = None
super(UserLoginProfile, __self__).__init__(
'aws:iam/userLoginProfile:UserLoginProfile',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
encrypted_password: Optional[pulumi.Input[str]] = None,
key_fingerprint: Optional[pulumi.Input[str]] = None,
password_length: Optional[pulumi.Input[int]] = None,
password_reset_required: Optional[pulumi.Input[bool]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'UserLoginProfile':
"""
Get an existing UserLoginProfile resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] encrypted_password: The encrypted password, base64 encoded. Only available if password was handled on this provider resource creation, not import.
:param pulumi.Input[str] key_fingerprint: The fingerprint of the PGP key used to encrypt the password. Only available if password was handled on this provider resource creation, not import.
:param pulumi.Input[int] password_length: The length of the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[bool] password_reset_required: Whether the user should be forced to reset the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:username`. Only applies on resource creation. Drift detection is not possible with this argument.
:param pulumi.Input[str] user: The IAM user's name.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["encrypted_password"] = encrypted_password
__props__["key_fingerprint"] = key_fingerprint
__props__["password_length"] = password_length
__props__["password_reset_required"] = password_reset_required
__props__["pgp_key"] = pgp_key
__props__["user"] = user
return UserLoginProfile(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="encryptedPassword")
def encrypted_password(self) -> pulumi.Output[str]:
"""
The encrypted password, base64 encoded. Only available if password was handled on this provider resource creation, not import.
"""
return pulumi.get(self, "encrypted_password")
@property
@pulumi.getter(name="keyFingerprint")
def key_fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint of the PGP key used to encrypt the password. Only available if password was handled on this provider resource creation, not import.
"""
return pulumi.get(self, "key_fingerprint")
@property
@pulumi.getter(name="passwordLength")
def password_length(self) -> pulumi.Output[Optional[int]]:
"""
The length of the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
"""
return pulumi.get(self, "password_length")
@property
@pulumi.getter(name="passwordResetRequired")
def password_reset_required(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the user should be forced to reset the generated password on resource creation. Only applies on resource creation. Drift detection is not possible with this argument.
"""
return pulumi.get(self, "password_reset_required")
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> pulumi.Output[str]:
"""
Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:username`. Only applies on resource creation. Drift detection is not possible with this argument.
"""
return pulumi.get(self, "pgp_key")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
The IAM user's name.
"""
return pulumi.get(self, "user")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| []
| []
| []
| [] | [] | python | null | null | null |
envconfig.go | // Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"encoding"
"errors"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
// ErrInvalidSpecification indicates that a specification is of the wrong type.
var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
var gatherRegexp = regexp.MustCompile("([^A-Z]+|[A-Z]+[^A-Z]+|[A-Z]+)")
var acronymRegexp = regexp.MustCompile("([A-Z]+)([A-Z][^A-Z]+)")
var defaultValue = make(map[string]string)
// A ParseError occurs when an environment variable cannot be converted to
// the type required by a struct field during assignment.
type ParseError struct {
KeyName string
FieldName string
TypeName string
Value string
Err error
}
// Decoder has the same semantics as Setter, but takes higher precedence.
// It is provided for historical compatibility.
type Decoder interface {
Decode(value string) error
}
// Setter is implemented by types can self-deserialize values.
// Any type that implements flag.Value also implements Setter.
type Setter interface {
Set(value string) error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
}
// EnvVar maintains information about the configuration variable
type EnvVar struct {
Name string
Alt string
Key string
Field reflect.Value
Tags reflect.StructTag
}
func (e EnvVar) GetValue(security bool) string {
rv := e.Field
switch rv.Kind() {
case reflect.Slice, reflect.Array:
values := make([]string, 0)
for i := 0; i < rv.Len(); i++ {
values = append(values, stringValueOf(rv.Index(i), security))
}
return strings.Join(values, ", ")
default:
return stringValueOf(rv, security)
}
}
type ISecurityStringer interface {
SecurityString() string
}
func stringValueOf(rv reflect.Value, security bool) string {
v := rv.Interface()
switch rv.Kind() {
case reflect.Array, reflect.Slice:
values := make([]string, 0)
for i := 0; i < rv.Len(); i++ {
values = append(values, stringValueOf(rv.Index(i), security))
}
return strings.Join(values, ",")
default:
if security {
if securityStringer, ok := v.(ISecurityStringer); ok {
return securityStringer.SecurityString()
}
}
s, err := ConvertToStr(v)
if err != nil {
panic(err)
}
return s
}
}
// GatherInfo gathers information about the specified struct
func GatherInfo(prefix string, spec interface{}) ([]EnvVar, error) {
s := reflect.ValueOf(spec)
if s.Kind() != reflect.Ptr {
return nil, ErrInvalidSpecification
}
s = s.Elem()
if s.Kind() != reflect.Struct {
return nil, ErrInvalidSpecification
}
typeOfSpec := s.Type()
// over allocate an info array, we will extend if needed later
infos := make([]EnvVar, 0, s.NumField())
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
ftype := typeOfSpec.Field(i)
if !f.CanSet() || isTrue(ftype.Tag.Get("ignored")) {
continue
}
for f.Kind() == reflect.Ptr {
if f.IsNil() {
if f.Type().Elem().Kind() != reflect.Struct {
// nil pointer to a non-struct: leave it alone
break
}
// nil pointer to struct: create a zero instance
f.Set(reflect.New(f.Type().Elem()))
}
f = f.Elem()
}
// Capture information about the config variable
info := EnvVar{
Name: ftype.Name,
Field: f,
Tags: ftype.Tag,
Alt: strings.ToUpper(ftype.Tag.Get("envconfig")),
}
// Default to the field name as the env var name (will be upcased)
info.Key = info.Name
// Best effort to un-pick camel casing as separate words
if isTrue(ftype.Tag.Get("split_words")) {
words := gatherRegexp.FindAllStringSubmatch(ftype.Name, -1)
if len(words) > 0 {
var name []string
for _, words := range words {
if m := acronymRegexp.FindStringSubmatch(words[0]); len(m) == 3 {
name = append(name, m[1], m[2])
} else {
name = append(name, words[0])
}
}
info.Key = strings.Join(name, "_")
}
}
if info.Alt != "" {
info.Key = info.Alt
}
if prefix != "" {
info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
}
info.Key = strings.ToUpper(info.Key)
infos = append(infos, info)
if f.Kind() == reflect.Struct {
// honor Decode if present
if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil && binaryUnmarshaler(f) == nil {
innerPrefix := prefix
if !ftype.Anonymous {
innerPrefix = info.Key
}
embeddedPtr := f.Addr().Interface()
embeddedInfos, err := GatherInfo(innerPrefix, embeddedPtr)
if err != nil {
return nil, err
}
infos = append(infos[:len(infos)-1], embeddedInfos...)
continue
}
}
}
return infos, nil
}
// CheckDisallowed checks that no environment variables with the prefix are set
// that we don't know how or want to parse. This is likely only meaningful with
// a non-empty prefix.
func CheckDisallowed(prefix string, spec interface{}) error {
infos, err := GatherInfo(prefix, spec)
if err != nil {
return err
}
vars := make(map[string]struct{})
for _, info := range infos {
vars[info.Key] = struct{}{}
}
if prefix != "" {
prefix = strings.ToUpper(prefix) + "_"
}
for _, env := range os.Environ() {
if !strings.HasPrefix(env, prefix) {
continue
}
v := strings.SplitN(env, "=", 2)[0]
if _, found := vars[v]; !found {
return fmt.Errorf("unknown environment variable %s", v)
}
}
return nil
}
func LoadDefaultFromYml(filePath string) error {
data, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
return yaml.Unmarshal(data, defaultValue)
}
// Process populates the specified struct based on environment variables
func Process(prefix string, spec interface{}) error {
infos, err := GatherInfo(prefix, spec)
for _, info := range infos {
// `os.Getenv` cannot differentiate between an explicitly set empty value
// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
// but it is only available in go1.5 or newer. We're using Go build tags
// here to use os.LookupEnv for >=go1.5
value, ok := lookupEnv(info.Key)
if !ok && info.Alt != "" {
value, ok = lookupEnv(info.Alt)
}
def := info.Tags.Get("default")
if def != "" && !ok {
value = def
}
defFromYml, defExist := defaultValue[info.Key]
if def == "" && !ok && defExist && defFromYml != "" {
value = defFromYml
}
req := info.Tags.Get("required")
if !ok && def == "" && !defExist {
if isTrue(req) {
key := info.Key
if info.Alt != "" {
key = info.Alt
}
return fmt.Errorf("required key %s missing value", key)
}
continue
}
err = processField(value, info.Field)
if err != nil {
return &ParseError{
KeyName: info.Key,
FieldName: info.Name,
TypeName: info.Field.Type().String(),
Value: value,
Err: err,
}
}
}
return err
}
// MustProcess is the same as Process but panics if an error occurs
func MustProcess(prefix string, spec interface{}) {
if err := Process(prefix, spec); err != nil {
panic(err)
}
}
func processField(value string, field reflect.Value) error {
typ := field.Type()
decoder := decoderFrom(field)
if decoder != nil {
return decoder.Decode(value)
}
// look for Set method if Decode not defined
setter := setterFrom(field)
if setter != nil {
return setter.Set(value)
}
if t := textUnmarshaler(field); t != nil {
return t.UnmarshalText([]byte(value))
}
if b := binaryUnmarshaler(field); b != nil {
return b.UnmarshalBinary([]byte(value))
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
if field.IsNil() {
field.Set(reflect.New(typ))
}
field = field.Elem()
}
switch typ.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var (
val int64
err error
)
if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
var d time.Duration
d, err = time.ParseDuration(value)
val = int64(d)
} else {
val, err = strconv.ParseInt(value, 0, typ.Bits())
}
if err != nil {
return err
}
field.SetInt(val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val, err := strconv.ParseUint(value, 0, typ.Bits())
if err != nil {
return err
}
field.SetUint(val)
case reflect.Bool:
val, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(val)
case reflect.Float32, reflect.Float64:
val, err := strconv.ParseFloat(value, typ.Bits())
if err != nil {
return err
}
field.SetFloat(val)
case reflect.Slice:
sl := reflect.MakeSlice(typ, 0, 0)
if typ.Elem().Kind() == reflect.Uint8 {
sl = reflect.ValueOf([]byte(value))
} else if len(strings.TrimSpace(value)) != 0 {
vals := strings.Split(value, ",")
sl = reflect.MakeSlice(typ, len(vals), len(vals))
for i, val := range vals {
err := processField(val, sl.Index(i))
if err != nil {
return err
}
}
}
field.Set(sl)
case reflect.Map:
mp := reflect.MakeMap(typ)
if len(strings.TrimSpace(value)) != 0 {
pairs := strings.Split(value, ",")
for _, pair := range pairs {
kvpair := strings.Split(pair, ":")
if len(kvpair) != 2 {
return fmt.Errorf("invalid map item: %q", pair)
}
k := reflect.New(typ.Key()).Elem()
err := processField(kvpair[0], k)
if err != nil {
return err
}
v := reflect.New(typ.Elem()).Elem()
err = processField(kvpair[1], v)
if err != nil {
return err
}
mp.SetMapIndex(k, v)
}
}
field.Set(mp)
}
return nil
}
func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
// it may be impossible for a struct field to fail this check
if !field.CanInterface() {
return
}
var ok bool
fn(field.Interface(), &ok)
if !ok && field.CanAddr() {
fn(field.Addr().Interface(), &ok)
}
}
func decoderFrom(field reflect.Value) (d Decoder) {
interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
return d
}
func setterFrom(field reflect.Value) (s Setter) {
interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
return s
}
func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
return t
}
func binaryUnmarshaler(field reflect.Value) (b encoding.BinaryUnmarshaler) {
interfaceFrom(field, func(v interface{}, ok *bool) { b, *ok = v.(encoding.BinaryUnmarshaler) })
return b
}
func isTrue(s string) bool {
b, _ := strconv.ParseBool(s)
return b
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
Code-Text/code-to-text/code/run.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import os
import sys
from bleu import _bleu
import pickle
import torch
import json
import random
import logging
import argparse
import numpy as np
from io import open
from itertools import cycle
import torch.nn as nn
from model import Seq2Seq
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename):
"""Read examples from filename."""
examples=[]
with open(filename,encoding="utf-8") as f:
for idx, line in enumerate(f):
line=line.strip()
js=json.loads(line)
if 'idx' not in js:
js['idx']=idx
code=' '.join(js['code_tokens']).replace('\n',' ')
code=' '.join(code.strip().split())
nl=' '.join(js['docstring_tokens']).replace('\n','')
nl=' '.join(nl.strip().split())
examples.append(
Example(
idx = idx,
source=nl,
target = code,
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
def convert_examples_to_features(examples, tokenizer, args,stage=None):
features = []
for example_index, example in enumerate(examples):
#source
source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
source_mask = [1] * (len(source_tokens))
padding_length = args.max_source_length - len(source_ids)
source_ids+=[tokenizer.pad_token_id]*padding_length
source_mask+=[0]*padding_length
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] *len(target_ids)
padding_length = args.max_target_length - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
target_mask+=[0]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
source_mask,
target_mask,
)
)
return features
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type: e.g. roberta")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model: e.g. roberta-base" )
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--load_model_path", default=None, type=str,
help="Path to trained model: Should contain the .bin files" )
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument("--train_steps", default=-1, type=int,
help="")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# print arguments
args = parser.parse_args()
logger.info(args)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))
args.device = device
# Set seed
set_seed(args.seed)
# make dir if output_dir not exist
if os.path.exists(args.output_dir) is False:
os.makedirs(args.output_dir)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)
#budild model
encoder = model_class.from_pretrained(args.model_name_or_path,config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
if args.load_model_path is not None:
logger.info("reload model from {}".format(args.load_model_path))
model.load_state_dict(torch.load(args.load_model_path))
model.to(device)
if args.local_rank != -1:
# Distributed training
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare training data loader
train_examples = read_examples(args.train_filename)
train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)
num_train_optimization_steps = args.train_steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(t_total*0.1),
num_training_steps=t_total)
#Start training
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num epoch = %d", args.num_train_epochs)
model.train()
dev_dataset={}
nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
for epoch in range(args.num_train_epochs):
bar = tqdm(train_dataloader,total=len(train_dataloader))
for batch in bar:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
bar.set_description("epoch {} loss {}".format(epoch,train_loss))
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
#Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.do_eval:
#Eval model with dev dataset
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
eval_flag=False
if 'dev_loss' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_loss']
else:
eval_examples = read_examples(args.dev_filename)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
dev_dataset['dev_loss']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info("\n***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
#Start Evaling model
model.eval()
eval_loss,tokens_num = 0,0
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
with torch.no_grad():
_,loss,num = model(source_ids=source_ids,source_mask=source_mask,
target_ids=target_ids,target_mask=target_mask)
eval_loss += loss.sum().item()
tokens_num += num.sum().item()
#Pring loss of dev dataset
model.train()
eval_loss = eval_loss / tokens_num
result = {'eval_ppl': round(np.exp(eval_loss),5),
'global_step': global_step+1,
'train_loss': round(train_loss,5)}
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
logger.info(" "+"*"*20)
#save last checkpoint
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if eval_loss<best_loss:
logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
logger.info(" "+"*"*20)
best_loss=eval_loss
# Save best checkpoint for best ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
#Calculate bleu
if 'dev_bleu' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_bleu']
else:
eval_examples = read_examples(args.dev_filename)
eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
dev_dataset['dev_bleu']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "dev.gold"))
dev_bleu=round(_bleu(predictions, os.path.join(args.output_dir, "dev.gold")),2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if dev_bleu>best_bleu:
logger.info(" Best bleu:%s",dev_bleu)
logger.info(" "+"*"*20)
best_bleu=dev_bleu
# Save best checkpoint for best bleu
output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if args.do_test:
files=[]
if args.dev_filename is not None:
files.append(args.dev_filename)
if args.test_filename is not None:
files.append(args.test_filename)
for idx,file in enumerate(files):
logger.info("Test file: {}".format(file))
eval_examples = read_examples(file)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
# Calculate bleu
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx)))
dev_bleu=round(_bleu(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx))),2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if __name__ == "__main__":
main()
| []
| []
| [
"PYHTONHASHSEED"
]
| [] | ["PYHTONHASHSEED"] | python | 1 | 0 | |
src/spaceone/inventory/service/collector_service.py | import logging
from spaceone.core.service import *
from spaceone.core.error import *
from spaceone.core import utils
from spaceone.inventory.error import *
from spaceone.inventory.manager.collection_state_manager import CollectionStateManager
from spaceone.inventory.manager.collector_manager import CollectorManager
from spaceone.inventory.manager.collector_manager.repository_manager import RepositoryManager
_LOGGER = logging.getLogger(__name__)
_KEYWORD_FILTER = ['collector_id', 'name', 'provider']
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class CollectorService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['name', 'plugin_info', 'domain_id'])
def create(self, params):
"""
Args:
params (dict): {
'name': 'str',
'plugin_info': 'dict',
'priority': 'int',
'tags': 'dict',
'is_public': 'bool',
'project_id': 'str',
'domain_id': 'str'
}
Returns:
collector_vo (object)
"""
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
is_public = params.get('is_public', True)
project_id = params.get('project_id', None)
if (is_public is False) and (project_id is None):
_LOGGER.error(f'[create] project_id is required, if is_public is false')
raise ERROR_REQUIRED_PARAMETER(key='project_id')
plugin_info = self._get_plugin(params['plugin_info'], params['domain_id'])
params['capability'] = plugin_info.get('capability', {})
params['provider'] = plugin_info.get('provider')
_LOGGER.debug(f'[create] capability: {params["capability"]}')
_LOGGER.debug(f'[create] provider: {params["provider"]}')
return collector_mgr.create_collector(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def update(self, params):
"""
Args:
params (dict): {
'collector_id': 'str',
'name': 'str',
'priority': 'int',
'tags': 'dict',
'domain_id': 'str'
}
Returns:
collector_vo (object)
"""
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
try:
collector_vo = collector_mgr.get_collector(collector_id, domain_id)
except Exception as e:
raise ERROR_NO_COLLECTOR(collector_id=collector_id, domain_id=domain_id)
# If plugin_info exists, we need deep merge with previous information
# (merged_params, version_check) = self._get_merged_params(params, collector_vo.plugin_info)
# _LOGGER.debug(f'[update] params: {params}')
# _LOGGER.debug(f'[update] merged_params: {merged_params}')
if 'plugin_info' in params:
original_plugin_info = collector_vo.plugin_info.to_dict()
version = params['plugin_info'].get('version', original_plugin_info['version'])
options = params['plugin_info'].get('options', original_plugin_info['options'])
upgrade_mode = params['plugin_info'].get('upgrade_mode', original_plugin_info['upgrade_mode'])
collector_mgr.update_plugin(collector_id, domain_id, version, options, upgrade_mode)
del params['plugin_info']
return collector_mgr.update_collector_by_vo(collector_vo, params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def delete(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_mgr.delete_collector(params['collector_id'], params['domain_id'])
# Cascade Delete Collection State
state_mgr: CollectionStateManager = self.locator.get_manager('CollectionStateManager')
state_mgr.delete_collection_state_by_collector_id(params['collector_id'], params['domain_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def get(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
only = params.get('only')
return collector_mgr.get_collector(collector_id, domain_id, only)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def enable(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
return collector_mgr.enable_collector(collector_id, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def disable(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
return collector_mgr.disable_collector(collector_id, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
@append_query_filter(['collector_id', 'name', 'state', 'priority', 'plugin_id', 'domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(_KEYWORD_FILTER)
def list(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
query = params.get('query', {})
return collector_mgr.list_collectors(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id'])
@change_tag_filter('tags')
@append_keyword_filter(_KEYWORD_FILTER)
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list) : 'list of statistics data'
"""
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
query = params.get('query', {})
return collector_mgr.stat_collectors(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def collect(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
job_info = collector_mgr.collect(params)
return job_info
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def update_plugin(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
version = params.get('version')
options = params.get('options')
upgrade_mode = params.get('upgrade_mode')
#updated_option = collector_mgr.verify_plugin(collector_id, secret_id, domain_id)
collector_vo = collector_mgr.update_plugin(collector_id, domain_id, version, options, upgrade_mode)
return collector_vo
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
def verify_plugin(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
secret_id = params.get('secret_id', None)
domain_id = params['domain_id']
#updated_option = collector_mgr.verify_plugin(collector_id, secret_id, domain_id)
collector_mgr.verify_plugin(collector_id, secret_id, domain_id)
# If you here, succeed verify
#return {'status': True}
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'schedule', 'domain_id'])
def add_schedule(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
domain_id = params['domain_id']
collector_vo = collector_mgr.get_collector(collector_id, domain_id)
params['collector'] = collector_vo
# Check schedule type
collector_mgr.is_supported_schedule(collector_vo, params['schedule'])
scheduler_info = collector_mgr.add_schedule(params)
return scheduler_info
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'schedule_id', 'domain_id'])
def get_schedule(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
schedule_id = params['schedule_id']
domain_id = params['domain_id']
return collector_mgr.get_schedule(schedule_id, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'schedule_id', 'domain_id'])
def update_schedule(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
schedule_id = params['schedule_id']
domain_id = params['domain_id']
schedule_vo = collector_mgr.get_schedule(schedule_id, domain_id)
collector_vo = collector_mgr.update_schedule_by_vo(params, schedule_vo)
return collector_vo
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'schedule_id', 'domain_id'])
def delete_schedule(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
collector_id = params['collector_id']
schedule_id = params['schedule_id']
domain_id = params['domain_id']
return collector_mgr.delete_schedule(schedule_id, domain_id)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['collector_id', 'domain_id'])
@change_only_key({'collector_info': 'collector'}, key_path='query.only')
@append_query_filter(['collector_id', 'schedule_id', 'domain_id'])
@append_keyword_filter(['schedule_id', 'name'])
def list_schedules(self, params):
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
query = params.get('query', {})
# Temporary code for DB migration
if 'only' in query:
query['only'] += ['collector_id']
return collector_mgr.list_schedules(query)
############################
# for schedule
############################
@check_required(['schedule'])
def scheduled_collectors(self, params):
""" Search all collectors in this schedule
This is global search out-of domain
Args:
schedule(dict): {
'hours': list,
'minutes': list
}
domain_id: optional
ex) {'hour': 3}
Returns: collectors_info
"""
collector_mgr: CollectorManager = self.locator.get_manager('CollectorManager')
# state: ENABLED
# filter_query = [{'k':'collector.state','v':'ENABLED','o':'eq'}]
filter_query = []
if 'domain_id' in params:
domain_id = params['domain_id']
# update query
filter_query.append(_make_query_domain(domain_id))
# parse schedule
schedule = params['schedule']
if 'hour' in schedule:
# find plugins which has hour rule
filter_query.append(_make_query_hour(schedule['hour']))
elif 'minute' in schedule:
# find plugins which has minute rule
filter_query.append(_make_query_minute(schedule['minute']))
elif 'interval' in schedule:
# find interval schedules
filter_query.append(_make_query_interval())
else:
# TODO: CRON
pass
# make query for list_collector
query = {'filter': filter_query}
_LOGGER.debug(f'[scheduled_collectors] query: {query}')
return collector_mgr.list_schedules(query)
def _get_plugin(self, plugin_info, domain_id):
plugin_id = plugin_info['plugin_id']
# version = plugin_info['version']
repo_mgr: RepositoryManager = self.locator.get_manager('RepositoryManager')
plugin_info = repo_mgr.get_plugin(plugin_id, domain_id)
# repo_mgr.check_plugin_version(plugin_id, version, domain_id)
return plugin_info
def _make_query_domain(domain_id):
return {
'k': 'domain_id',
'v': domain_id,
'o': 'eq'
}
def _make_query_hour(hour: int):
# make query hour
return {
'k': 'schedule.hours',
'v': hour,
'o': 'contain'
}
def _make_query_minute(minute: int):
# make minute query
return {
'k': 'schedule.minute',
'v': minute,
'o': 'contain'
}
def _make_query_interval():
return {
'k': 'schedule.interval',
'v': 0,
'o': 'gt'
}
| []
| []
| []
| [] | [] | python | null | null | null |
backend/test_30383/wsgi.py | """
WSGI config for test_30383 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_30383.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/ko/build/gobuild_test.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package build
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"time"
"testing"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/random"
)
type testContext struct {
gopath string
workdir string
}
func (tc *testContext) Enter(t *testing.T) {
// Track the original state, so that we can restore it.
ogp := os.Getenv("GOPATH")
// Change the current state for the test.
os.Setenv("GOPATH", tc.gopath)
getwd = func() (string, error) {
return tc.workdir, nil
}
// Record the original state for restoration.
tc.gopath = ogp
}
func (tc *testContext) Exit(t *testing.T) {
// Restore the original state.
os.Setenv("GOPATH", tc.gopath)
getwd = os.Getwd
}
func TestComputeImportPath(t *testing.T) {
tests := []struct {
desc string
ctx testContext
expectErr bool
expectImportpath string
}{{
desc: "simple gopath",
ctx: testContext{
gopath: "/go",
workdir: "/go/src/github.com/foo/bar",
},
expectImportpath: "github.com/foo/bar",
}, {
desc: "trailing slashes",
ctx: testContext{
gopath: "/go/",
workdir: "/go/src/github.com/foo/bar/",
},
expectImportpath: "github.com/foo/bar",
}, {
desc: "not on gopath",
ctx: testContext{
gopath: "/go",
workdir: "/rust/src/github.com/foo/bar",
},
expectErr: true,
}}
if runtime.GOOS == "windows" {
for i := range tests {
tests[i].ctx.gopath = "C:" + filepath.FromSlash(tests[i].ctx.gopath)
tests[i].ctx.workdir = "C:" + filepath.FromSlash(tests[i].ctx.workdir)
}
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
// Set the context for our test.
test.ctx.Enter(t)
defer test.ctx.Exit(t)
ip, err := computeImportpath()
if err != nil && !test.expectErr {
t.Errorf("computeImportpath() = %v, want %v", err, test.expectImportpath)
} else if err == nil && test.expectErr {
t.Errorf("computeImportpath() = %v, want error", ip)
} else if err == nil && !test.expectErr {
if got, want := ip, test.expectImportpath; want != got {
t.Errorf("computeImportpath() = %v, want %v", got, want)
}
}
})
}
}
func TestGoBuildIsSupportedRef(t *testing.T) {
img, err := random.Image(1024, 1)
if err != nil {
t.Fatalf("random.Image() = %v", err)
}
importpath := "github.com/google/go-containerregistry"
tc := testContext{
gopath: "/go",
workdir: "/go/src/" + importpath,
}
tc.Enter(t)
defer tc.Exit(t)
ng, err := NewGo(Options{GetBase: func(string) (v1.Image, error) {
return img, nil
}})
if err != nil {
t.Fatalf("NewGo() = %v", err)
}
supportedTests := []string{
path.Join(importpath, "pkg", "foo"),
path.Join(importpath, "cmd", "crane"),
}
for _, test := range supportedTests {
t.Run(test, func(t *testing.T) {
if !ng.IsSupportedReference(test) {
t.Errorf("IsSupportedReference(%v) = false, want true", test)
}
})
}
unsupportedTests := []string{
"simple string",
filepath.FromSlash("k8s.io/client-go/pkg/foo"),
filepath.FromSlash("github.com/google/secret/cmd/sauce"),
filepath.Join("vendor", importpath, "pkg", "foo"),
}
for _, test := range unsupportedTests {
t.Run(test, func(t *testing.T) {
if ng.IsSupportedReference(test) {
t.Errorf("IsSupportedReference(%v) = true, want false", test)
}
})
}
}
// A helper method we use to substitute for the default "build" method.
func writeTempFile(s string) (string, error) {
file, err := ioutil.TempFile(os.TempDir(), "out")
if err != nil {
return "", err
}
defer file.Close()
if _, err := file.WriteString(filepath.ToSlash(s)); err != nil {
return "", err
}
return file.Name(), nil
}
func TestGoBuild(t *testing.T) {
baseLayers := int64(3)
base, err := random.Image(1024, baseLayers)
if err != nil {
t.Fatalf("random.Image() = %v", err)
}
importpath := "github.com/google/go-containerregistry"
tc := testContext{
gopath: "/go",
workdir: "/go/src/" + importpath,
}
tc.Enter(t)
defer tc.Exit(t)
creationTime := func() (*v1.Time, error) {
return &v1.Time{time.Unix(5000, 0)}, nil
}
ng, err := NewGo(
Options{
GetCreationTime: creationTime,
GetBase: func(string) (v1.Image, error) {
return base, nil
},
})
if err != nil {
t.Fatalf("NewGo() = %v", err)
}
ng.(*gobuild).build = writeTempFile
img, err := ng.Build(filepath.Join(importpath, "cmd", "crane"))
if err != nil {
t.Errorf("Build() = %v", err)
}
ls, err := img.Layers()
if err != nil {
t.Errorf("Layers() = %v", err)
}
// Check that we have the expected number of layers.
t.Run("check layer count", func(t *testing.T) {
if got, want := int64(len(ls)), baseLayers+1; got != want {
t.Fatalf("len(Layers()) = %v, want %v", got, want)
}
})
// While we have a randomized base image, the application layer should be completely deterministic.
// Check that when given fixed build outputs we get a fixed layer hash.
t.Run("check determinism", func(t *testing.T) {
expectedHash := v1.Hash{
Algorithm: "sha256",
Hex: "a7be3daf6084d1ec81ca903599d23a514903c9e875d60414ff8009994615bd70",
}
appLayer := ls[baseLayers]
if got, err := appLayer.Digest(); err != nil {
t.Errorf("Digest() = %v", err)
} else if got != expectedHash {
t.Errorf("Digest() = %v, want %v", got, expectedHash)
}
})
// Check that the entrypoint of the image is configured to invoke our Go application
t.Run("check entrypoint", func(t *testing.T) {
cfg, err := img.ConfigFile()
if err != nil {
t.Errorf("ConfigFile() = %v", err)
}
entrypoint := cfg.Config.Entrypoint
if got, want := len(entrypoint), 1; got != want {
t.Errorf("len(entrypoint) = %v, want %v", got, want)
}
if got, want := entrypoint[0], appPath; got != want {
t.Errorf("entrypoint = %v, want %v", got, want)
}
})
t.Run("check creation time", func(t *testing.T) {
cfg, err := img.ConfigFile()
if err != nil {
t.Errorf("ConfigFile() = %v", err)
}
actual := cfg.Created
want, err := creationTime()
if err != nil {
t.Errorf("CreationTime() = %v", err)
}
if actual.Time != want.Time {
t.Errorf("created = %v, want %v", actual, want)
}
})
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
feconf.py | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stores various configuration options and constants for Oppia."""
from __future__ import absolute_import # pylint: disable=import-only-modules
import copy
import datetime
import os
from constants import constants
# The datastore model ID for the list of featured activity references. This
# value should not be changed.
ACTIVITY_REFERENCE_LIST_FEATURED = 'featured'
ALL_ACTIVITY_REFERENCE_LIST_TYPES = [ACTIVITY_REFERENCE_LIST_FEATURED]
# The values which a post_commit_status can have: public, private.
POST_COMMIT_STATUS_PUBLIC = 'public'
POST_COMMIT_STATUS_PRIVATE = 'private'
# Whether to unconditionally log info messages.
DEBUG = False
# When DEV_MODE is true check that we are running in development environment.
# The SERVER_SOFTWARE environment variable does not exist in Travis, hence the
# need for an explicit check.
if (constants.DEV_MODE and os.getenv('SERVER_SOFTWARE') and
not os.getenv('SERVER_SOFTWARE', default='').startswith('Development')):
raise Exception('DEV_MODE can\'t be true on production.')
CLASSIFIERS_DIR = os.path.join('extensions', 'classifiers')
TESTS_DATA_DIR = os.path.join('core', 'tests', 'data')
SAMPLE_EXPLORATIONS_DIR = os.path.join('data', 'explorations')
SAMPLE_COLLECTIONS_DIR = os.path.join('data', 'collections')
CONTENT_VALIDATION_DIR = os.path.join('core', 'domain')
# backend_prod_files contain processed JS and HTML files that are served by
# Jinja, we are moving away from Jinja so this folder might not be needed later
# (#6964)
EXTENSIONS_DIR_PREFIX = (
'backend_prod_files' if not constants.DEV_MODE else '')
ACTIONS_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'actions'))
ISSUES_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'issues'))
INTERACTIONS_DIR = (
os.path.join('extensions', 'interactions'))
RTE_EXTENSIONS_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'rich_text_components'))
RTE_EXTENSIONS_DEFINITIONS_PATH = (
os.path.join('assets', 'rich_text_components_definitions.ts'))
OBJECT_TEMPLATES_DIR = os.path.join('extensions', 'objects', 'templates')
# Choose production templates folder when we are in production mode.
FRONTEND_TEMPLATES_DIR = (
os.path.join('webpack_bundles') if constants.DEV_MODE else
os.path.join('backend_prod_files', 'webpack_bundles'))
DEPENDENCIES_TEMPLATES_DIR = (
os.path.join(EXTENSIONS_DIR_PREFIX, 'extensions', 'dependencies'))
VALUE_GENERATORS_DIR_FOR_JS = os.path.join(
'local_compiled_js', 'extensions', 'value_generators')
VALUE_GENERATORS_DIR = os.path.join('extensions', 'value_generators')
VISUALIZATIONS_DIR = os.path.join(
'extensions', 'visualizations')
VISUALIZATIONS_DIR_FOR_JS = os.path.join(
'local_compiled_js', 'extensions', 'visualizations')
OBJECT_DEFAULT_VALUES_FILE_PATH = os.path.join(
'extensions', 'objects', 'object_defaults.json')
RULES_DESCRIPTIONS_FILE_PATH = os.path.join(
os.getcwd(), 'extensions', 'interactions', 'rule_templates.json')
# A mapping of interaction ids to classifier properties.
INTERACTION_CLASSIFIER_MAPPING = {
'TextInput': {
'algorithm_id': 'TextClassifier',
'current_data_schema_version': 1
},
'CodeRepl': {
'algorithm_id': 'CodeClassifier',
'current_data_schema_version': 1
}
}
# Classifier job time to live (in mins).
CLASSIFIER_JOB_TTL_MINS = 5
TRAINING_JOB_STATUS_COMPLETE = 'COMPLETE'
TRAINING_JOB_STATUS_FAILED = 'FAILED'
TRAINING_JOB_STATUS_NEW = 'NEW'
TRAINING_JOB_STATUS_PENDING = 'PENDING'
ALLOWED_TRAINING_JOB_STATUSES = [
TRAINING_JOB_STATUS_COMPLETE,
TRAINING_JOB_STATUS_FAILED,
TRAINING_JOB_STATUS_NEW,
TRAINING_JOB_STATUS_PENDING
]
# The maximum number of characters allowed for userbio length.
MAX_BIO_LENGTH_IN_CHARS = 2000
ALLOWED_TRAINING_JOB_STATUS_CHANGES = {
TRAINING_JOB_STATUS_COMPLETE: [],
TRAINING_JOB_STATUS_NEW: [TRAINING_JOB_STATUS_PENDING],
TRAINING_JOB_STATUS_PENDING: [TRAINING_JOB_STATUS_COMPLETE,
TRAINING_JOB_STATUS_FAILED],
TRAINING_JOB_STATUS_FAILED: [TRAINING_JOB_STATUS_NEW]
}
ENTITY_TYPE_EXPLORATION = 'exploration'
ENTITY_TYPE_TOPIC = 'topic'
ENTITY_TYPE_SKILL = 'skill'
ENTITY_TYPE_STORY = 'story'
ENTITY_TYPE_SUBTOPIC = 'subtopic'
ENTITY_TYPE_QUESTION = 'question'
# The maximum number of activities allowed in the playlist of the learner. This
# limit applies to both the explorations playlist and the collections playlist.
MAX_LEARNER_PLAYLIST_ACTIVITY_COUNT = 10
# The minimum number of training samples required for training a classifier.
MIN_TOTAL_TRAINING_EXAMPLES = 50
# The minimum number of assigned labels required for training a classifier.
MIN_ASSIGNED_LABELS = 2
# Default label for classification algorithms.
DEFAULT_CLASSIFIER_LABEL = '_default'
# The maximum number of results to retrieve in a datastore query.
DEFAULT_QUERY_LIMIT = 1000
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE = 8
# The maximum number of results to retrieve in a datastore query
# for top rated published explorations in /library/top_rated page.
NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE = 20
# The maximum number of results to retrieve in a datastore query
# for recently published explorations in /library/recently_published page.
RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE = 20
# The current version of the dashboard stats blob schema. If any backward-
# incompatible changes are made to the stats blob schema in the data store,
# this version number must be changed.
CURRENT_DASHBOARD_STATS_SCHEMA_VERSION = 1
# The current version of the exploration states blob schema. If any backward-
# incompatible changes are made to the states blob schema in the data store,
# this version number must be changed and the exploration migration job
# executed.
CURRENT_STATE_SCHEMA_VERSION = 30
# The current version of the all collection blob schemas (such as the nodes
# structure within the Collection domain object). If any backward-incompatible
# changes are made to any of the blob schemas in the data store, this version
# number must be changed.
CURRENT_COLLECTION_SCHEMA_VERSION = 6
# The current version of story contents dict in the story schema.
CURRENT_STORY_CONTENTS_SCHEMA_VERSION = 1
# The current version of skill contents dict in the skill schema.
CURRENT_SKILL_CONTENTS_SCHEMA_VERSION = 1
# The current version of misconceptions dict in the skill schema.
CURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 1
# The current version of rubric dict in the skill schema.
CURRENT_RUBRIC_SCHEMA_VERSION = 1
# The current version of subtopics dict in the topic schema.
CURRENT_SUBTOPIC_SCHEMA_VERSION = 1
# The current version of story reference dict in the topic schema.
CURRENT_STORY_REFERENCE_SCHEMA_VERSION = 1
# The current version of page_contents dict in the subtopic page schema.
CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION = 1
# This value should be updated in the event of any
# StateAnswersModel.submitted_answer_list schema change.
CURRENT_STATE_ANSWERS_SCHEMA_VERSION = 1
# This value should be updated if the schema of LearnerAnswerInfo
# dict schema changes.
CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION = 1
# The default number of exploration tiles to load at a time in the search
# results page.
SEARCH_RESULTS_PAGE_SIZE = 20
# The default number of commits to show on a page in the exploration history
# tab.
COMMIT_LIST_PAGE_SIZE = 50
# The default number of items to show on a page in the exploration feedback
# tab.
FEEDBACK_TAB_PAGE_SIZE = 20
# The default number of opportunities to show on community dashboard page.
OPPORTUNITIES_PAGE_SIZE = 20
# The maximum number of top unresolved answers which should be aggregated
# from all of the submitted answers.
TOP_UNRESOLVED_ANSWERS_LIMIT = 20
# Default title for a newly-minted exploration.
DEFAULT_EXPLORATION_TITLE = ''
# Default category for a newly-minted exploration.
DEFAULT_EXPLORATION_CATEGORY = ''
# Default objective for a newly-minted exploration.
DEFAULT_EXPLORATION_OBJECTIVE = ''
# NOTE TO DEVELOPERS: If any of the 5 constants below are modified, the
# corresponding field in NEW_STATE_TEMPLATE in constants.js also has to be
# modified.
# Default name for the initial state of an exploration.
DEFAULT_INIT_STATE_NAME = 'Introduction'
# Default content id for the state's content.
DEFAULT_NEW_STATE_CONTENT_ID = 'content'
# Default content id for the interaction's default outcome.
DEFAULT_OUTCOME_CONTENT_ID = 'default_outcome'
# Default content id for the explanation in the concept card of a skill.
DEFAULT_EXPLANATION_CONTENT_ID = 'explanation'
# Default recorded_voiceovers dict for a default state template.
DEFAULT_RECORDED_VOICEOVERS = {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
}
# Default written_translations dict for a default state template.
DEFAULT_WRITTEN_TRANSLATIONS = {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
}
# The default content text for the initial state of an exploration.
DEFAULT_INIT_STATE_CONTENT_STR = ''
# Whether new explorations should have automatic text-to-speech enabled
# by default.
DEFAULT_AUTO_TTS_ENABLED = True
# Default title for a newly-minted collection.
DEFAULT_COLLECTION_TITLE = ''
# Default category for a newly-minted collection.
DEFAULT_COLLECTION_CATEGORY = ''
# Default objective for a newly-minted collection.
DEFAULT_COLLECTION_OBJECTIVE = ''
# Default description for a newly-minted story.
DEFAULT_STORY_DESCRIPTION = ''
# Default notes for a newly-minted story.
DEFAULT_STORY_NOTES = ''
# Default explanation for a newly-minted skill.
DEFAULT_SKILL_EXPLANATION = ''
# Default name for a newly-minted misconception.
DEFAULT_MISCONCEPTION_NAME = ''
# Default notes for a newly-minted misconception.
DEFAULT_MISCONCEPTION_NOTES = ''
# Default feedback for a newly-minted misconception.
DEFAULT_MISCONCEPTION_FEEDBACK = ''
# Default content_id for explanation subtitled html.
DEFAULT_SKILL_EXPLANATION_CONTENT_ID = 'explanation'
# Default description for a newly-minted topic.
DEFAULT_TOPIC_DESCRIPTION = ''
# Default content id for the subtopic page's content.
DEFAULT_SUBTOPIC_PAGE_CONTENT_ID = 'content'
# Default ID of VM which is used for training classifier.
DEFAULT_VM_ID = 'vm_default'
# Shared secret key for default VM.
DEFAULT_VM_SHARED_SECRET = '1a2b3c4e'
# An array containing the accepted image formats (as determined by the imghdr
# module) and the corresponding allowed extensions in the filenames of uploaded
# images.
ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS = {
'jpeg': ['jpg', 'jpeg'],
'png': ['png'],
'gif': ['gif'],
}
# An array containing the accepted audio extensions for uploaded files and
# the corresponding MIME types.
ACCEPTED_AUDIO_EXTENSIONS = {
'mp3': ['audio/mp3']
}
# Prefix for data sent from the server to the client via JSON.
XSSI_PREFIX = ')]}\'\n'
# A regular expression for alphanumeric characters.
ALPHANUMERIC_REGEX = r'^[A-Za-z0-9]+$'
# These are here rather than in rating_services.py to avoid import
# circularities with exp_services.
# TODO(Jacob): Refactor exp_services to remove this problem.
_EMPTY_RATINGS = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}
def get_empty_ratings():
"""Returns a copy of the empty ratings object.
Returns:
dict. Copy of the '_EMPTY_RATINGS' dict object which contains the empty
ratings.
"""
return copy.deepcopy(_EMPTY_RATINGS)
# Empty scaled average rating as a float.
EMPTY_SCALED_AVERAGE_RATING = 0.0
# To use GAE email service.
EMAIL_SERVICE_PROVIDER_GAE = 'gae_email_service'
# To use mailgun email service.
EMAIL_SERVICE_PROVIDER_MAILGUN = 'mailgun_email_service'
# Use GAE email service by default.
EMAIL_SERVICE_PROVIDER = EMAIL_SERVICE_PROVIDER_GAE
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun API key.
MAILGUN_API_KEY = None
# If the Mailgun email API is used, the "None" below should be replaced
# with the Mailgun domain name (ending with mailgun.org).
MAILGUN_DOMAIN_NAME = None
# Committer id for system actions.
SYSTEM_COMMITTER_ID = 'admin'
# Domain name for email address.
INCOMING_EMAILS_DOMAIN_NAME = 'example.com'
SYSTEM_EMAIL_ADDRESS = '[email protected]'
SYSTEM_EMAIL_NAME = '.'
ADMIN_EMAIL_ADDRESS = '[email protected]'
NOREPLY_EMAIL_ADDRESS = '[email protected]'
# Ensure that SYSTEM_EMAIL_ADDRESS and ADMIN_EMAIL_ADDRESS are both valid and
# correspond to owners of the app before setting this to True. If
# SYSTEM_EMAIL_ADDRESS is not that of an app owner, email messages from this
# address cannot be sent. If True then emails can be sent to any user.
CAN_SEND_EMAILS = False
# If you want to turn on this facility please check the email templates in the
# send_role_notification_email() function in email_manager.py and modify them
# accordingly.
CAN_SEND_EDITOR_ROLE_EMAILS = False
# If enabled then emails will be sent to creators for feedback messages.
CAN_SEND_FEEDBACK_MESSAGE_EMAILS = False
# If enabled subscription emails will be sent to that user.
CAN_SEND_SUBSCRIPTION_EMAILS = False
# Time to wait before sending feedback message emails (currently set to 1
# hour).
DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS = 3600
# Whether to send an email when new feedback message is received for
# an exploration.
DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE = True
# Whether to send an email to all the creator's subscribers when he/she
# publishes an exploration.
DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE = True
# Whether exploration feedback emails are muted,
# when the user has not specified a preference.
DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE = False
# Whether exploration suggestion emails are muted,
# when the user has not specified a preference.
DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE = False
# Whether to send email updates to a user who has not specified a preference.
DEFAULT_EMAIL_UPDATES_PREFERENCE = False
# Whether to send an invitation email when the user is granted
# new role permissions in an exploration.
DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE = True
# Whether to require an email to be sent, following a moderator action.
REQUIRE_EMAIL_ON_MODERATOR_ACTION = False
# Timespan in minutes before allowing duplicate emails.
DUPLICATE_EMAIL_INTERVAL_MINS = 2
# Number of digits after decimal to which the average ratings value in the
# dashboard is rounded off to.
AVERAGE_RATINGS_DASHBOARD_PRECISION = 2
# Whether to enable maintenance mode on the site. For non-admins, this redirects
# all HTTP requests to the maintenance page. This is the only check which
# determines whether the site is in maintenance mode to avoid queries to the
# database by non-admins.
ENABLE_MAINTENANCE_MODE = False
# Whether community dashboard is ready to use for contributors.
COMMUNITY_DASHBOARD_ENABLED = False
# The interactions permissible for a question.
ALLOWED_QUESTION_INTERACTION_IDS = [
'TextInput', 'MultipleChoiceInput', 'NumericInput']
# Flag to disable sending emails related to reviews for suggestions. To be
# flipped after deciding (and implementing) whether a user should be scored
# only for curated lessons.
SEND_SUGGESTION_REVIEW_RELATED_EMAILS = False
# To prevent recording scores for users until details like whether to score
# users for only curated lessons is confirmed.
ENABLE_RECORDING_OF_SCORES = False
# No. of pretest questions to display.
NUM_PRETEST_QUESTIONS = 3
# Whether to automatically accept suggestions after a threshold time.
ENABLE_AUTO_ACCEPT_OF_SUGGESTIONS = False
EMAIL_INTENT_SIGNUP = 'signup'
EMAIL_INTENT_DAILY_BATCH = 'daily_batch'
EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION = 'editor_role_notification'
EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION = 'feedback_message_notification'
EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION = 'subscription_notification'
EMAIL_INTENT_SUGGESTION_NOTIFICATION = 'suggestion_notification'
EMAIL_INTENT_REPORT_BAD_CONTENT = 'report_bad_content'
EMAIL_INTENT_MARKETING = 'marketing'
EMAIL_INTENT_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
EMAIL_INTENT_DELETE_EXPLORATION = 'delete_exploration'
EMAIL_INTENT_QUERY_STATUS_NOTIFICATION = 'query_status_notification'
EMAIL_INTENT_ONBOARD_REVIEWER = 'onboard_reviewer'
EMAIL_INTENT_REVIEW_SUGGESTIONS = 'review_suggestions'
# Possible intents for email sent in bulk.
BULK_EMAIL_INTENT_MARKETING = 'bulk_email_marketing'
BULK_EMAIL_INTENT_IMPROVE_EXPLORATION = 'bulk_email_improve_exploration'
BULK_EMAIL_INTENT_CREATE_EXPLORATION = 'bulk_email_create_exploration'
BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT = 'bulk_email_creator_reengagement'
BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT = 'bulk_email_learner_reengagement'
BULK_EMAIL_INTENT_TEST = 'bulk_email_test'
MESSAGE_TYPE_FEEDBACK = 'feedback'
MESSAGE_TYPE_SUGGESTION = 'suggestion'
MODERATOR_ACTION_UNPUBLISH_EXPLORATION = 'unpublish_exploration'
DEFAULT_SALUTATION_HTML_FN = (
lambda recipient_username: 'Hi %s,' % recipient_username)
DEFAULT_SIGNOFF_HTML_FN = (
lambda sender_username: (
'Thanks!<br>%s (Oppia moderator)' % sender_username))
VALID_MODERATOR_ACTIONS = {
MODERATOR_ACTION_UNPUBLISH_EXPLORATION: {
'email_config': 'unpublish_exploration_email_html_body',
'email_subject_fn': (
lambda exp_title: (
'Your Oppia exploration "%s" has been unpublished' % exp_title)
),
'email_intent': 'unpublish_exploration',
'email_salutation_html_fn': DEFAULT_SALUTATION_HTML_FN,
'email_signoff_html_fn': DEFAULT_SIGNOFF_HTML_FN,
},
}
# When the site terms were last updated, in UTC.
REGISTRATION_PAGE_LAST_UPDATED_UTC = datetime.datetime(2015, 10, 14, 2, 40, 0)
# Format of string for dashboard statistics logs.
# NOTE TO DEVELOPERS: This format should not be changed, since it is used in
# the existing storage models for UserStatsModel.
DASHBOARD_STATS_DATETIME_STRING_FORMAT = '%Y-%m-%d'
# The maximum size of an uploaded file, in bytes.
MAX_FILE_SIZE_BYTES = 1048576
# The maximum playback length of an audio file, in seconds.
MAX_AUDIO_FILE_LENGTH_SEC = 300
# The maximum number of questions to be fetched at one time.
MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME = 20
# The minimum score required for a user to review suggestions of a particular
# category.
MINIMUM_SCORE_REQUIRED_TO_REVIEW = 10
# The number of medium question skill difficulty.
MEDIUM_SKILL_DIFFICULTY = 0.6
# The maximum number of skills to be requested at one time when fetching
# questions.
MAX_NUMBER_OF_SKILL_IDS = 20
# The prefix for an 'accepted suggestion' commit message.
COMMIT_MESSAGE_ACCEPTED_SUGGESTION_PREFIX = 'Accepted suggestion by'
# User id and username for exploration migration bot. Commits made by this bot
# are not reflected in the exploration summary models, but are recorded in the
# exploration commit log.
MIGRATION_BOT_USER_ID = 'OppiaMigrationBot'
MIGRATION_BOT_USERNAME = 'OppiaMigrationBot'
# User id and username for suggestion bot. This bot will be used to accept
# suggestions automatically after a threshold time.
SUGGESTION_BOT_USER_ID = 'OppiaSuggestionBot'
SUGGESTION_BOT_USERNAME = 'OppiaSuggestionBot'
# Ids and locations of the permitted extensions.
ALLOWED_RTE_EXTENSIONS = {
'Collapsible': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Collapsible')
},
'Image': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Image')
},
'Link': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Link')
},
'Math': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Math')
},
'Tabs': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Tabs')
},
'Video': {
'dir': os.path.join(RTE_EXTENSIONS_DIR, 'Video')
},
}
# The list of interaction IDs which correspond to interactions that set their
# is_linear property to true. Linear interactions do not support branching and
# thus only allow for default answer classification. This value is guarded by a
# test in extensions.interactions.base_test.
LINEAR_INTERACTION_IDS = ['Continue']
# Demo explorations to load through the admin panel. The id assigned to each
# exploration is based on the key of the exploration in this dict, so ensure it
# doesn't change once it's in the list. Only integer-based indices should be
# used in this list, as it maintains backward compatibility with how demo
# explorations used to be assigned IDs. The value of each entry in this dict is
# either a YAML file or a directory (depending on whether it ends in .yaml).
# These explorations can be found under data/explorations.
DEMO_EXPLORATIONS = {
u'0': 'welcome.yaml',
u'1': 'multiples.yaml',
u'2': 'binary_search',
u'3': 'root_linear_coefficient_theorem.yaml',
u'4': 'three_balls',
# TODO(bhenning): Replace demo exploration '5' with a new exploration
# described in #1376.
u'6': 'boot_verbs.yaml',
u'7': 'hola.yaml',
u'8': 'adventure.yaml',
u'9': 'pitch_perfect.yaml',
u'10': 'test_interactions',
u'11': 'modeling_graphs',
u'12': 'protractor_test_1.yaml',
u'13': 'solar_system',
u'14': 'about_oppia.yaml',
u'15': 'classifier_demo_exploration.yaml',
u'16': 'all_interactions',
u'17': 'audio_test',
u'18': 'code_classifier_test.yaml',
u'19': 'example_exploration_in_collection1.yaml',
u'20': 'example_exploration_in_collection2.yaml',
u'21': 'example_exploration_in_collection3.yaml',
u'22': 'protractor_mobile_test_exploration.yaml',
u'23': 'rating_test.yaml',
u'24': 'learner_flow_test.yaml',
u'25': 'exploration_player_test.yaml',
}
DEMO_COLLECTIONS = {
u'0': 'welcome_to_collections.yaml',
u'1': 'learner_flow_test_collection.yaml'
}
# IDs of explorations which should not be displayable in either the learner or
# editor views.
DISABLED_EXPLORATION_IDS = ['5']
# Oppia Google Group URL.
GOOGLE_GROUP_URL = (
'https://groups.google.com/forum/?place=forum/oppia#!forum/oppia')
# External URL for the Foundation site.
FOUNDATION_SITE_URL = 'http://oppiafoundation.org'
# Prefix for all taskqueue-related URLs.
TASKQUEUE_URL_PREFIX = '/task'
TASK_URL_FEEDBACK_MESSAGE_EMAILS = (
'%s/email/batchfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FEEDBACK_STATUS_EMAILS = (
'%s/email/feedbackthreadstatuschangeemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_FLAG_EXPLORATION_EMAILS = (
'%s/email/flagexplorationemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_INSTANT_FEEDBACK_EMAILS = (
'%s/email/instantfeedbackmessageemailhandler' % TASKQUEUE_URL_PREFIX)
TASK_URL_SUGGESTION_EMAILS = (
'%s/email/suggestionemailhandler' % TASKQUEUE_URL_PREFIX)
# TODO(sll): Add all other URLs here.
ADMIN_URL = '/admin'
ADMIN_ROLE_HANDLER_URL = '/adminrolehandler'
COLLECTION_DATA_URL_PREFIX = '/collection_handler/data'
COLLECTION_EDITOR_DATA_URL_PREFIX = '/collection_editor_handler/data'
COLLECTION_SUMMARIES_DATA_URL = '/collectionsummarieshandler/data'
COLLECTION_RIGHTS_PREFIX = '/collection_editor_handler/rights'
COLLECTION_PUBLISH_PREFIX = '/collection_editor_handler/publish'
COLLECTION_UNPUBLISH_PREFIX = '/collection_editor_handler/unpublish'
COLLECTION_EDITOR_URL_PREFIX = '/collection_editor/create'
COLLECTION_URL_PREFIX = '/collection'
COMMUNITY_DASHBOARD_URL = '/community_dashboard'
COMMUNITY_OPPORTUNITIES_DATA_URL = '/opportunitiessummaryhandler'
CONCEPT_CARD_DATA_URL_PREFIX = '/concept_card_handler'
CREATOR_DASHBOARD_DATA_URL = '/creatordashboardhandler/data'
CREATOR_DASHBOARD_URL = '/creator_dashboard'
CSRF_HANDLER_URL = '/csrfhandler'
CUSTOM_NONPROFITS_LANDING_PAGE_URL = '/nonprofits'
CUSTOM_PARENTS_LANDING_PAGE_URL = '/parents'
CUSTOM_PARTNERS_LANDING_PAGE_URL = '/partners'
CUSTOM_TEACHERS_LANDING_PAGE_URL = '/teachers'
CUSTOM_VOLUNTEERS_LANDING_PAGE_URL = '/volunteers'
DASHBOARD_CREATE_MODE_URL = '%s?mode=create' % CREATOR_DASHBOARD_URL
EDITOR_URL_PREFIX = '/create'
EXPLORATION_DATA_PREFIX = '/createhandler/data'
EXPLORATION_FEATURES_PREFIX = '/explorehandler/features'
EXPLORATION_INIT_URL_PREFIX = '/explorehandler/init'
EXPLORATION_LEARNER_ANSWER_DETAILS = (
'/learneranswerinfohandler/learner_answer_details')
EXPLORATION_METADATA_SEARCH_URL = '/exploration/metadata_search'
EXPLORATION_PRETESTS_URL_PREFIX = '/pretest_handler'
EXPLORATION_RIGHTS_PREFIX = '/createhandler/rights'
EXPLORATION_STATE_ANSWER_STATS_PREFIX = '/createhandler/state_answer_stats'
EXPLORATION_STATUS_PREFIX = '/createhandler/status'
EXPLORATION_SUMMARIES_DATA_URL = '/explorationsummarieshandler/data'
EXPLORATION_URL_PREFIX = '/explore'
EXPLORATION_URL_EMBED_PREFIX = '/embed/exploration'
FEEDBACK_STATS_URL_PREFIX = '/feedbackstatshandler'
FEEDBACK_THREAD_URL_PREFIX = '/threadhandler'
FEEDBACK_THREADLIST_URL_PREFIX = '/threadlisthandler'
FEEDBACK_THREADLIST_URL_PREFIX_FOR_TOPICS = '/threadlisthandlerfortopic'
FEEDBACK_THREAD_VIEW_EVENT_URL = '/feedbackhandler/thread_view_event'
FLAG_EXPLORATION_URL_PREFIX = '/flagexplorationhandler'
FRACTIONS_LANDING_PAGE_URL = '/fractions'
TOPIC_LANDING_PAGE_URL = '/learn/<subject>/<topic>'
LEARNER_ANSWER_INFO_HANDLER_URL = (
'/learneranswerinfohandler/learner_answer_details')
LEARNER_ANSWER_DETAILS_SUBMIT_URL = '/learneranswerdetailshandler'
LEARNER_DASHBOARD_URL = '/learner_dashboard'
LEARNER_DASHBOARD_DATA_URL = '/learnerdashboardhandler/data'
LEARNER_DASHBOARD_IDS_DATA_URL = '/learnerdashboardidshandler/data'
LEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL = '/learnerdashboardthreadhandler'
LEARNER_PLAYLIST_DATA_URL = '/learnerplaylistactivityhandler'
LEARNER_INCOMPLETE_ACTIVITY_DATA_URL = '/learnerincompleteactivityhandler'
LIBRARY_GROUP_DATA_URL = '/librarygrouphandler'
LIBRARY_INDEX_URL = '/library'
LIBRARY_INDEX_DATA_URL = '/libraryindexhandler'
LIBRARY_RECENTLY_PUBLISHED_URL = '/library/recently_published'
LIBRARY_SEARCH_URL = '/search/find'
LIBRARY_SEARCH_DATA_URL = '/searchhandler/data'
LIBRARY_TOP_RATED_URL = '/library/top_rated'
MERGE_SKILLS_URL = '/merge_skills_handler'
NEW_COLLECTION_URL = '/collection_editor_handler/create_new'
NEW_EXPLORATION_URL = '/contributehandler/create_new'
NEW_QUESTION_URL = '/question_editor_handler/create_new'
NEW_SKILL_URL = '/skill_editor_handler/create_new'
TOPIC_EDITOR_STORY_URL = '/topic_editor_story_handler'
TOPIC_EDITOR_QUESTION_URL = '/topic_editor_question_handler'
NEW_TOPIC_URL = '/topic_editor_handler/create_new'
NOTIFICATIONS_DASHBOARD_URL = '/notifications_dashboard'
PREFERENCES_URL = '/preferences'
PRACTICE_SESSION_URL_PREFIX = '/practice_session'
PRACTICE_SESSION_DATA_URL_PREFIX = '/practice_session/data'
PREFERENCES_DATA_URL = '/preferenceshandler/data'
QUESTION_EDITOR_DATA_URL_PREFIX = '/question_editor_handler/data'
QUESTION_SKILL_LINK_URL_PREFIX = '/manage_question_skill_link'
QUESTIONS_LIST_URL_PREFIX = '/questions_list_handler'
QUESTIONS_URL_PREFIX = '/question_player_handler'
RECENT_COMMITS_DATA_URL = '/recentcommitshandler/recent_commits'
RECENT_FEEDBACK_MESSAGES_DATA_URL = '/recent_feedback_messages'
REVIEW_TEST_DATA_URL_PREFIX = '/review_test_handler/data'
REVIEW_TEST_URL_PREFIX = '/review_test'
ROBOTS_TXT_URL = '/robots.txt'
SITE_LANGUAGE_DATA_URL = '/save_site_language'
SIGNUP_DATA_URL = '/signuphandler/data'
SIGNUP_URL = '/signup'
SKILL_DATA_URL_PREFIX = '/skill_data_handler'
SKILL_EDITOR_DATA_URL_PREFIX = '/skill_editor_handler/data'
SKILL_EDITOR_URL_PREFIX = '/skill_editor'
SKILL_EDITOR_QUESTION_URL = '/skill_editor_question_handler'
SKILL_MASTERY_DATA_URL = '/skill_mastery_handler/data'
SKILL_PUBLISH_URL_PREFIX = '/skill_editor_handler/publish_skill'
SKILL_RIGHTS_URL_PREFIX = '/skill_editor_handler/rights'
SPLASH_URL = '/splash'
STORY_DATA_HANDLER = '/story_data_handler'
STORY_EDITOR_URL_PREFIX = '/story_editor'
STORY_EDITOR_DATA_URL_PREFIX = '/story_editor_handler/data'
STORY_NODE_COMPLETION_URL_PREFIX = '/story_node_completion_handler'
STORY_PUBLISH_HANDLER = '/story_publish_handler'
STORY_VIEWER_URL_PREFIX = '/story'
SUBTOPIC_DATA_HANDLER = '/subtopic_data_handler'
SUBTOPIC_VIEWER_URL_PREFIX = '/subtopic'
SUGGESTION_ACTION_URL_PREFIX = '/suggestionactionhandler'
SUGGESTION_LIST_URL_PREFIX = '/suggestionlisthandler'
SUGGESTION_URL_PREFIX = '/suggestionhandler'
SUBSCRIBE_URL_PREFIX = '/subscribehandler'
SUBTOPIC_PAGE_EDITOR_DATA_URL_PREFIX = '/subtopic_page_editor_handler/data'
TOPIC_VIEWER_URL_PREFIX = '/topic'
TOPIC_DATA_HANDLER = '/topic_data_handler'
TOPIC_EDITOR_DATA_URL_PREFIX = '/topic_editor_handler/data'
TOPIC_EDITOR_URL_PREFIX = '/topic_editor'
TOPIC_RIGHTS_URL_PREFIX = '/rightshandler/get_topic_rights'
TOPIC_SEND_MAIL_URL_PREFIX = '/rightshandler/send_topic_publish_mail'
TOPIC_STATUS_URL_PREFIX = '/rightshandler/change_topic_status'
TOPICS_AND_SKILLS_DASHBOARD_DATA_URL = '/topics_and_skills_dashboard/data'
TOPICS_AND_SKILLS_DASHBOARD_URL = '/topics_and_skills_dashboard'
UNSUBSCRIBE_URL_PREFIX = '/unsubscribehandler'
UPLOAD_EXPLORATION_URL = '/contributehandler/upload'
USER_EXPLORATION_EMAILS_PREFIX = '/createhandler/notificationpreferences'
USER_PERMISSIONS_URL_PREFIX = '/createhandler/permissions'
USERNAME_CHECK_DATA_URL = '/usernamehandler/data'
# Event types.
EVENT_TYPE_ALL_STATS = 'all_stats'
EVENT_TYPE_STATE_HIT = 'state_hit'
EVENT_TYPE_STATE_COMPLETED = 'state_complete'
EVENT_TYPE_ANSWER_SUBMITTED = 'answer_submitted'
EVENT_TYPE_DEFAULT_ANSWER_RESOLVED = 'default_answer_resolved'
EVENT_TYPE_NEW_THREAD_CREATED = 'feedback_thread_created'
EVENT_TYPE_THREAD_STATUS_CHANGED = 'feedback_thread_status_changed'
EVENT_TYPE_RATE_EXPLORATION = 'rate_exploration'
EVENT_TYPE_SOLUTION_HIT = 'solution_hit'
EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP = 'leave_for_refresher_exp'
# The values for these event types should be left as-is for backwards
# compatibility.
EVENT_TYPE_START_EXPLORATION = 'start'
EVENT_TYPE_ACTUAL_START_EXPLORATION = 'actual_start'
EVENT_TYPE_MAYBE_LEAVE_EXPLORATION = 'leave'
EVENT_TYPE_COMPLETE_EXPLORATION = 'complete'
# Play type constants.
PLAY_TYPE_PLAYTEST = 'playtest'
PLAY_TYPE_NORMAL = 'normal'
# Predefined commit messages.
COMMIT_MESSAGE_EXPLORATION_DELETED = 'Exploration deleted.'
COMMIT_MESSAGE_COLLECTION_DELETED = 'Collection deleted.'
COMMIT_MESSAGE_QUESTION_DELETED = 'Question deleted.'
COMMIT_MESSAGE_SKILL_DELETED = 'Skill deleted.'
COMMIT_MESSAGE_STORY_DELETED = 'Story deleted.'
COMMIT_MESSAGE_SUBTOPIC_PAGE_DELETED = 'Subtopic page deleted.'
COMMIT_MESSAGE_TOPIC_DELETED = 'Topic deleted.'
# Max number of playthroughs for an issue.
MAX_PLAYTHROUGHS_FOR_ISSUE = 5
# Number of unresolved answers to be displayed in the dashboard for each
# exploration.
TOP_UNRESOLVED_ANSWERS_COUNT_DASHBOARD = 3
# Number of open feedback to be displayed in the dashboard for each exploration.
OPEN_FEEDBACK_COUNT_DASHBOARD = 3
# NOTE TO DEVELOPERS: This should be synchronized with App.js.
ENABLE_ML_CLASSIFIERS = False
SHOW_COLLECTION_NAVIGATION_TAB_HISTORY = False
SHOW_COLLECTION_NAVIGATION_TAB_STATS = False
# The regular expression used to identify whether a string contains float value.
# The regex must match with regex that is stored in vmconf.py file of Oppia-ml.
# If this regex needs to be modified then first of all shutdown Oppia-ml VM.
# Then update the regex constant in here and Oppia both.
# Run any migration job that is required to migrate existing trained models
# before starting Oppia-ml again.
FLOAT_VERIFIER_REGEX = (
'^([-+]?\\d*\\.\\d+)$|^([-+]?(\\d*\\.?\\d+|\\d+\\.?\\d*)e[-+]?\\d*)$')
# Current event models schema version. All event models with an
# event_schema_version of 1 are the events collected before the rework of the
# statistics framework which brought about the recording of new event models;
# these models include all models recorded before Feb 2018.
CURRENT_EVENT_MODELS_SCHEMA_VERSION = 2
# Output formats of downloaded explorations.
OUTPUT_FORMAT_JSON = 'json'
OUTPUT_FORMAT_ZIP = 'zip'
# Types of updates shown in the 'recent updates' table in the dashboard page.
UPDATE_TYPE_EXPLORATION_COMMIT = 'exploration_commit'
UPDATE_TYPE_COLLECTION_COMMIT = 'collection_commit'
UPDATE_TYPE_FEEDBACK_MESSAGE = 'feedback_thread'
# Possible values for user query status.
# Valid status transitions are: processing --> completed --> archived
# or processing --> failed.
USER_QUERY_STATUS_PROCESSING = 'processing'
USER_QUERY_STATUS_COMPLETED = 'completed'
USER_QUERY_STATUS_ARCHIVED = 'archived'
USER_QUERY_STATUS_FAILED = 'failed'
# The time difference between which to consider two login events "close". This
# is taken to be 12 hours.
PROXIMAL_TIMEDELTA_SECS = 12 * 60 * 60
# The i18n id for the header of the "Featured Activities" category in the
# library index page.
LIBRARY_CATEGORY_FEATURED_ACTIVITIES = 'I18N_LIBRARY_GROUPS_FEATURED_ACTIVITIES'
# The i18n id for the header of the "Top Rated Explorations" category in the
# library index page.
LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS = (
'I18N_LIBRARY_GROUPS_TOP_RATED_EXPLORATIONS')
# The i18n id for the header of the "Recently Published" category in the
# library index page.
LIBRARY_CATEGORY_RECENTLY_PUBLISHED = 'I18N_LIBRARY_GROUPS_RECENTLY_PUBLISHED'
# The group name that appears at the end of the url for the recently published
# page.
LIBRARY_GROUP_RECENTLY_PUBLISHED = 'recently_published'
# The group name that appears at the end of the url for the top rated page.
LIBRARY_GROUP_TOP_RATED = 'top_rated'
# Defaults for topic similarities.
DEFAULT_TOPIC_SIMILARITY = 0.5
SAME_TOPIC_SIMILARITY = 1.0
# The following are all page descriptions for the meta tag.
CREATE_PAGE_DESCRIPTION = (
'Help others learn new things. Create lessons through explorations and '
'share your knowledge with the community.')
CREATOR_DASHBOARD_PAGE_DESCRIPTION = (
'Keep track of the lessons you have created, as well as feedback from '
'learners.')
LIBRARY_GROUP_PAGE_DESCRIPTION = (
'Discover top-rated or recently-published explorations on Oppia. Learn '
'from these explorations or help improve an existing one for the '
'community.')
# The type of the response returned by a handler when an exception is raised.
HANDLER_TYPE_HTML = 'html'
HANDLER_TYPE_JSON = 'json'
HANDLER_TYPE_DOWNLOADABLE = 'downloadable'
# Following are the constants for the role IDs.
ROLE_ID_GUEST = 'GUEST'
ROLE_ID_BANNED_USER = 'BANNED_USER'
ROLE_ID_EXPLORATION_EDITOR = 'EXPLORATION_EDITOR'
ROLE_ID_COLLECTION_EDITOR = 'COLLECTION_EDITOR'
ROLE_ID_TOPIC_MANAGER = 'TOPIC_MANAGER'
ROLE_ID_MODERATOR = 'MODERATOR'
ROLE_ID_ADMIN = 'ADMIN'
# Intent of the User making query to role structure via admin interface. Used
# to store audit data regarding queries to role IDs.
ROLE_ACTION_UPDATE = 'update'
ROLE_ACTION_VIEW_BY_USERNAME = 'view_by_username'
ROLE_ACTION_VIEW_BY_ROLE = 'view_by_role'
VIEW_METHOD_ROLE = 'role'
VIEW_METHOD_USERNAME = 'username'
QUESTION_BATCH_SIZE = 10
STATE_ANSWER_STATS_MIN_FREQUENCY = 2
RTE_FORMAT_TEXTANGULAR = 'text-angular'
RTE_FORMAT_CKEDITOR = 'ck-editor'
# RTE content specifications according to the type of the editor.
RTE_CONTENT_SPEC = {
'RTE_TYPE_TEXTANGULAR': {
# Valid parent-child relation in TextAngular.
'ALLOWED_PARENT_LIST': {
'p': ['blockquote', 'div', 'pre', '[document]', 'ol', 'ul', 'li'],
'b': ['i', 'li', 'p', 'pre'],
'br': ['b', 'i', 'li', 'p'],
'i': ['b', 'li', 'p', 'pre'],
'li': ['ol', 'ul'],
'ol': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],
'ul': ['ol', 'ul', 'blockquote', 'li', 'pre', 'div', '[document]'],
'pre': ['ol', 'ul', 'blockquote', '[document]'],
'blockquote': ['blockquote', '[document]'],
'oppia-noninteractive-link': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-math': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-image': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-collapsible': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-video': ['b', 'i', 'li', 'p', 'pre'],
'oppia-noninteractive-tabs': ['b', 'i', 'li', 'p', 'pre']
},
# Valid html tags in TextAngular.
'ALLOWED_TAG_LIST': [
'p',
'b',
'br',
'i',
'li',
'ol',
'ul',
'pre',
'blockquote',
'oppia-noninteractive-link',
'oppia-noninteractive-math',
'oppia-noninteractive-image',
'oppia-noninteractive-collapsible',
'oppia-noninteractive-video',
'oppia-noninteractive-tabs'
]
},
'RTE_TYPE_CKEDITOR': {
# Valid parent-child relation in CKEditor.
'ALLOWED_PARENT_LIST': {
'p': ['blockquote', '[document]', 'li'],
'strong': ['em', 'li', 'p', 'pre'],
'em': ['strong', 'li', 'p', 'pre'],
'br': ['strong', 'em', 'li', 'p'],
'li': ['ol', 'ul'],
'ol': ['li', 'blockquote', 'pre', '[document]'],
'ul': ['li', 'blockquote', 'pre', '[document]'],
'pre': ['ol', 'ul', 'blockquote', 'li', '[document]'],
'blockquote': ['blockquote', '[document]'],
'oppia-noninteractive-link': ['strong', 'em', 'li', 'p', 'pre'],
'oppia-noninteractive-math': ['strong', 'em', 'li', 'p', 'pre'],
'oppia-noninteractive-image': ['blockquote', 'li', '[document]'],
'oppia-noninteractive-collapsible': [
'blockquote', 'li', '[document]'
],
'oppia-noninteractive-video': ['blockquote', 'li', '[document]'],
'oppia-noninteractive-tabs': ['blockquote', 'li', '[document]']
},
# Valid html tags in CKEditor.
'ALLOWED_TAG_LIST': [
'p',
'strong',
'br',
'em',
'li',
'ol',
'ul',
'pre',
'blockquote',
'oppia-noninteractive-link',
'oppia-noninteractive-math',
'oppia-noninteractive-image',
'oppia-noninteractive-collapsible',
'oppia-noninteractive-video',
'oppia-noninteractive-tabs'
]
}
}
# A dict representing available landing pages, having subject as a key and list
# of topics as the value.
# Note: This dict needs to be keep in sync with frontend TOPIC_LANDING_PAGE_DATA
# oppia constant defined in
# core/templates/dev/head/pages/landing-pages/TopicLandingPage.js file.
AVAILABLE_LANDING_PAGES = {
'maths': ['fractions', 'negative-numbers', 'ratios']
}
| []
| []
| [
"SERVER_SOFTWARE"
]
| [] | ["SERVER_SOFTWARE"] | python | 1 | 0 | |
scripts/ci/check_compliance.py | #!/usr/bin/env python3
# Copyright (c) 2018,2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import collections
import sys
import subprocess
import re
import os
from email.utils import parseaddr
import logging
import argparse
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure, Attr
import tempfile
import traceback
import magic
import shlex
from pathlib import Path
# '*' makes it italic
EDIT_TIP = "\n\n*Tip: The bot edits this comment instead of posting a new " \
"one, so you can check the comment's history to see earlier " \
"messages.*"
logger = None
# This ends up as None when we're not running in a Zephyr tree
ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE')
def git(*args, cwd=None):
# Helper for running a Git command. Returns the rstrip()ed stdout output.
# Called like git("diff"). Exits with SystemError (raised by sys.exit()) on
# errors. 'cwd' is the working directory to use (default: current
# directory).
git_cmd = ("git",) + args
try:
git_process = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
except OSError as e:
err(f"failed to run '{cmd2str(git_cmd)}': {e}")
stdout, stderr = git_process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
if git_process.returncode or stderr:
err(f"""\
'{cmd2str(git_cmd)}' exited with status {git_process.returncode} and/or wrote
to stderr.
==stdout==
{stdout}
==stderr==
{stderr}""")
return stdout.rstrip()
def get_shas(refspec):
"""
Returns the list of Git SHAs for 'refspec'.
:param refspec:
:return:
"""
return git('rev-list',
'--max-count={}'.format(-1 if "." in refspec else 1),
refspec).split()
class MyCase(TestCase):
"""
Custom junitparser.TestCase for our tests that adds some extra <testcase>
XML attributes. These will be preserved when tests are saved and loaded.
"""
classname = Attr()
# Remembers informational messages. These can appear on successful tests
# too, where TestCase.result isn't set.
info_msg = Attr()
class ComplianceTest:
"""
Base class for tests. Inheriting classes should have a run() method and set
these class variables:
name:
Test name
doc:
Link to documentation related to what's being tested
path_hint:
The path the test runs itself in. This is just informative and used in
the message that gets printed when running the test.
The magic string "<git-top>" refers to the top-level repository
directory. This avoids running 'git' to find the top-level directory
before main() runs (class variable assignments run when the 'class ...'
statement runs). That avoids swallowing errors, because main() reports
them to GitHub.
"""
def __init__(self):
self.case = MyCase(self.name)
self.case.classname = "Guidelines"
def error(self, msg):
"""
Signals a problem with running the test, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after error().
Any failures generated prior to the error() are included automatically
in the message. Usually, any failures would indicate problems with the
test code.
"""
if self.case.result:
msg += "\n\nFailures before error: " + self.case.result._elem.text
self.case.result = Error(msg, "error")
raise EndTest
def skip(self, msg):
"""
Signals that the test should be skipped, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after skip().
Any failures generated prior to the skip() are included automatically
in the message. Usually, any failures would indicate problems with the
test code.
"""
if self.case.result:
msg += "\n\nFailures before skip: " + self.case.result._elem.text
self.case.result = Skipped(msg, "skipped")
raise EndTest
def add_failure(self, msg):
"""
Signals that the test failed, with message 'msg'. Can be called many
times within the same test to report multiple failures.
"""
if not self.case.result:
# First reported failure
self.case.result = Failure(self.name + " issues", "failure")
self.case.result._elem.text = msg.rstrip()
else:
# If there are multiple Failures, concatenate their messages
self.case.result._elem.text += "\n\n" + msg.rstrip()
def add_info(self, msg):
"""
Adds an informational message without failing the test. The message is
shown on GitHub, and is shown regardless of whether the test passes or
fails. If the test fails, then both the informational message and the
failure message are shown.
Can be called many times within the same test to add multiple messages.
"""
def escape(s):
# Hack to preserve e.g. newlines and tabs in the attribute when
# tests are saved to .xml and reloaded. junitparser doesn't seem to
# handle it correctly, though it does escape stuff like quotes.
# unicode-escape replaces newlines with \n (two characters), etc.
return s.encode("unicode-escape").decode("utf-8")
if not self.case.info_msg:
self.case.info_msg = escape(msg)
else:
self.case.info_msg += r"\n\n" + escape(msg)
class EndTest(Exception):
"""
Raised by ComplianceTest.error()/skip() to end the test.
Tests can raise EndTest themselves to immediately end the test, e.g. from
within a nested function call.
"""
class CheckPatch(ComplianceTest):
"""
Runs checkpatch and reports found issues
"""
name = "checkpatch"
doc = "See https://docs.zephyrproject.org/latest/contribute/#coding-style for more details."
path_hint = "<git-top>"
def run(self):
# Default to Zephyr's checkpatch if ZEPHYR_BASE is set
checkpatch = os.path.join(ZEPHYR_BASE or GIT_TOP, 'scripts',
'checkpatch.pl')
if not os.path.exists(checkpatch):
self.skip(checkpatch + " not found")
# git diff's output doesn't depend on the current (sub)directory
diff = subprocess.Popen(('git', 'diff', COMMIT_RANGE),
stdout=subprocess.PIPE)
try:
subprocess.check_output((checkpatch, '--mailback', '--no-tree', '-'),
stdin=diff.stdout,
stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
except subprocess.CalledProcessError as ex:
output = ex.output.decode("utf-8")
self.add_failure(output)
class KconfigCheck(ComplianceTest):
"""
Checks is we are introducing any new warnings/errors with Kconfig,
for example using undefiend Kconfig variables.
"""
name = "Kconfig"
doc = "See https://docs.zephyrproject.org/latest/guides/kconfig/index.html for more details."
path_hint = ZEPHYR_BASE
def run(self, full=True):
kconf = self.parse_kconfig()
self.check_top_menu_not_too_long(kconf)
self.check_no_pointless_menuconfigs(kconf)
self.check_no_undef_within_kconfig(kconf)
if full:
self.check_no_undef_outside_kconfig(kconf)
def get_modules(self, modules_file):
"""
Get a list of modules and put them in a file that is parsed by
Kconfig
This is needed to complete Kconfig sanity tests.
"""
# Invoke the script directly using the Python executable since this is
# not a module nor a pip-installed Python utility
zephyr_module_path = os.path.join(ZEPHYR_BASE, "scripts",
"zephyr_module.py")
cmd = [sys.executable, zephyr_module_path,
'--kconfig-out', modules_file]
try:
_ = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
self.error(ex.output)
modules_dir = ZEPHYR_BASE + '/modules'
modules = [name for name in os.listdir(modules_dir) if
os.path.exists(os.path.join(modules_dir, name, 'Kconfig'))]
with open(modules_file, 'r') as fp_module_file:
content = fp_module_file.read()
with open(modules_file, 'w') as fp_module_file:
for module in modules:
fp_module_file.write("ZEPHYR_{}_KCONFIG = {}\n".format(
re.sub('[^a-zA-Z0-9]', '_', module).upper(),
modules_dir + '/' + module + '/Kconfig'
))
fp_module_file.write(content)
def write_kconfig_soc(self):
"""
Write KConfig soc files to be sourced during Kconfig parsing
"""
soc_defconfig_file = os.path.join(tempfile.gettempdir(), "Kconfig.soc.defconfig")
soc_file = os.path.join(tempfile.gettempdir(), "Kconfig.soc")
soc_arch_file = os.path.join(tempfile.gettempdir(), "Kconfig.soc.arch")
shield_defconfig_file = os.path.join(tempfile.gettempdir(), "Kconfig.shield.defconfig")
shield_file = os.path.join(tempfile.gettempdir(), "Kconfig.shield")
try:
with open(soc_defconfig_file, 'w', encoding="utf-8") as fp:
fp.write(f'osource "{ZEPHYR_BASE}/soc/$(ARCH)/*/Kconfig.defconfig"\n')
with open(soc_file, 'w', encoding="utf-8") as fp:
fp.write(f'osource "{ZEPHYR_BASE}/soc/$(ARCH)/*/Kconfig.soc"\n')
with open(soc_arch_file, 'w', encoding="utf-8") as fp:
fp.write(f'osource "{ZEPHYR_BASE}/soc/$(ARCH)/Kconfig"\n\
osource "{ZEPHYR_BASE}/soc/$(ARCH)/*/Kconfig"\n')
with open(shield_defconfig_file, 'w', encoding="utf-8") as fp:
fp.write(f'osource "{ZEPHYR_BASE}/boards/shields/*/Kconfig.defconfig"\n')
with open(shield_file, 'w', encoding="utf-8") as fp:
fp.write(f'osource "{ZEPHYR_BASE}/boards/shields/*/Kconfig.shield"\n')
except IOError as ex:
self.error(ex.output)
def parse_kconfig(self):
"""
Returns a kconfiglib.Kconfig object for the Kconfig files. We reuse
this object for all tests to avoid having to reparse for each test.
"""
if not ZEPHYR_BASE:
self.skip("Not a Zephyr tree (ZEPHYR_BASE unset)")
# Put the Kconfiglib path first to make sure no local Kconfiglib version is
# used
kconfig_path = os.path.join(ZEPHYR_BASE, "scripts", "kconfig")
if not os.path.exists(kconfig_path):
self.error(kconfig_path + " not found")
sys.path.insert(0, kconfig_path)
# Import globally so that e.g. kconfiglib.Symbol can be referenced in
# tests
global kconfiglib
import kconfiglib
# Look up Kconfig files relative to ZEPHYR_BASE
os.environ["srctree"] = ZEPHYR_BASE
# Parse the entire Kconfig tree, to make sure we see all symbols
os.environ["SOC_DIR"] = "soc/"
os.environ["ARCH_DIR"] = "arch/"
os.environ["BOARD_DIR"] = "boards/*/*"
os.environ["ARCH"] = "*"
os.environ["KCONFIG_BINARY_DIR"] = tempfile.gettempdir()
os.environ['DEVICETREE_CONF'] = "dummy"
# Older name for DEVICETREE_CONF, for compatibility with older Zephyr
# versions that don't have the renaming
os.environ["GENERATED_DTS_BOARD_CONF"] = "dummy"
# For multi repo support
self.get_modules(os.path.join(tempfile.gettempdir(), "Kconfig.modules"))
# For list of SOC_ROOT support
self.write_kconfig_soc()
# Tells Kconfiglib to generate warnings for all references to undefined
# symbols within Kconfig files
os.environ["KCONFIG_WARN_UNDEF"] = "y"
try:
# Note this will both print warnings to stderr _and_ return
# them: so some warnings might get printed
# twice. "warn_to_stderr=False" could unfortunately cause
# some (other) warnings to never be printed.
return kconfiglib.Kconfig()
except kconfiglib.KconfigError as e:
self.add_failure(str(e))
raise EndTest
def check_top_menu_not_too_long(self, kconf):
"""
Checks that there aren't too many items in the top-level menu (which
might be a sign that stuff accidentally got added there)
"""
max_top_items = 50
n_top_items = 0
node = kconf.top_node.list
while node:
# Only count items with prompts. Other items will never be
# shown in the menuconfig (outside show-all mode).
if node.prompt:
n_top_items += 1
node = node.next
if n_top_items > max_top_items:
self.add_failure("""
Expected no more than {} potentially visible items (items with prompts) in the
top-level Kconfig menu, found {} items. If you're deliberately adding new
entries, then bump the 'max_top_items' variable in {}.
""".format(max_top_items, n_top_items, __file__))
def check_no_pointless_menuconfigs(self, kconf):
# Checks that there are no pointless 'menuconfig' symbols without
# children in the Kconfig files
bad_mconfs = []
for node in kconf.node_iter():
# 'kconfiglib' is global
# pylint: disable=undefined-variable
# Avoid flagging empty regular menus and choices, in case people do
# something with 'osource' (could happen for 'menuconfig' symbols
# too, though it's less likely)
if node.is_menuconfig and not node.list and \
isinstance(node.item, kconfiglib.Symbol):
bad_mconfs.append(node)
if bad_mconfs:
self.add_failure("""\
Found pointless 'menuconfig' symbols without children. Use regular 'config'
symbols instead. See
https://docs.zephyrproject.org/latest/guides/kconfig/tips.html#menuconfig-symbols.
""" + "\n".join(f"{node.item.name:35} {node.filename}:{node.linenr}"
for node in bad_mconfs))
def check_no_undef_within_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols within
the Kconfig files
"""
undef_ref_warnings = "\n\n\n".join(warning for warning in kconf.warnings
if "undefined symbol" in warning)
if undef_ref_warnings:
self.add_failure("Undefined Kconfig symbols:\n\n"
+ undef_ref_warnings)
def check_no_undef_outside_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols
outside Kconfig files (any CONFIG_FOO where no FOO symbol exists)
"""
# Grep for symbol references.
#
# Example output line for a reference to CONFIG_FOO at line 17 of
# foo/bar.c:
#
# foo/bar.c<null>17<null>#ifdef CONFIG_FOO
#
# 'git grep --only-matching' would get rid of the surrounding context
# ('#ifdef '), but it was added fairly recently (second half of 2018),
# so we extract the references from each line ourselves instead.
#
# The regex uses word boundaries (\b) to isolate the reference, and
# negative lookahead to automatically whitelist the following:
#
# - ##, for token pasting (CONFIG_FOO_##X)
#
# - $, e.g. for CMake variable expansion (CONFIG_FOO_${VAR})
#
# - @, e.g. for CMakes's configure_file() (CONFIG_FOO_@VAR@)
#
# - {, e.g. for Python scripts ("CONFIG_FOO_{}_BAR".format(...)")
#
# - *, meant for comments like '#endif /* CONFIG_FOO_* */
defined_syms = get_defined_syms(kconf)
# Maps each undefined symbol to a list <filename>:<linenr> strings
undef_to_locs = collections.defaultdict(list)
# Warning: Needs to work with both --perl-regexp and the 're' module
regex = r"\bCONFIG_[A-Z0-9_]+\b(?!\s*##|[$@{*])"
# Skip doc/releases, which often references removed symbols
grep_stdout = git("grep", "--line-number", "-I", "--null",
"--perl-regexp", regex, "--", ":!/doc/releases",
cwd=Path(GIT_TOP))
# splitlines() supports various line terminators
for grep_line in grep_stdout.splitlines():
path, lineno, line = grep_line.split("\0")
# Extract symbol references (might be more than one) within the
# line
for sym_name in re.findall(regex, line):
sym_name = sym_name[7:] # Strip CONFIG_
if sym_name not in defined_syms and \
sym_name not in UNDEF_KCONFIG_WHITELIST:
undef_to_locs[sym_name].append("{}:{}".format(path, lineno))
if not undef_to_locs:
return
# String that describes all referenced but undefined Kconfig symbols,
# in alphabetical order, along with the locations where they're
# referenced. Example:
#
# CONFIG_ALSO_MISSING arch/xtensa/core/fatal.c:273
# CONFIG_MISSING arch/xtensa/core/fatal.c:264, subsys/fb/cfb.c:20
undef_desc = "\n".join(
"CONFIG_{:35} {}".format(sym_name, ", ".join(locs))
for sym_name, locs in sorted(undef_to_locs.items()))
self.add_failure("""
Found references to undefined Kconfig symbols. If any of these are false
positives, then add them to UNDEF_KCONFIG_WHITELIST in {} in the ci-tools repo.
If the reference is for a comment like /* CONFIG_FOO_* */ (or
/* CONFIG_FOO_*_... */), then please use exactly that form (with the '*'). The
CI check knows not to flag it.
More generally, a reference followed by $, @, {{, *, or ## will never be
flagged.
{}""".format(os.path.basename(__file__), undef_desc))
def get_defined_syms(kconf):
# Returns a set() with the names of all defined Kconfig symbols (with no
# 'CONFIG_' prefix). This is complicated by samples and tests defining
# their own Kconfig trees. For those, just grep for 'config FOO' to find
# definitions. Doing it "properly" with Kconfiglib is still useful for the
# main tree, because some symbols are defined using preprocessor macros.
# Warning: Needs to work with both --perl-regexp and the 're' module.
# (?:...) is a non-capturing group.
regex = r"^\s*(?:menu)?config\s*([A-Z0-9_]+)\s*(?:#|$)"
# Grep samples/ and tests/ for symbol definitions
grep_stdout = git("grep", "-I", "-h", "--perl-regexp", regex, "--",
":samples", ":tests", cwd=ZEPHYR_BASE)
# Symbols from the main Kconfig tree + grepped definitions from samples and
# tests
return set([sym.name for sym in kconf.unique_defined_syms]
+ re.findall(regex, grep_stdout, re.MULTILINE))
# Many of these are symbols used as examples. Note that the list is sorted
# alphabetically, and skips the CONFIG_ prefix.
UNDEF_KCONFIG_WHITELIST = {
"ALSO_MISSING",
"APP_LINK_WITH_",
"CDC_ACM_PORT_NAME_",
"CLOCK_STM32_SYSCLK_SRC_",
"CMU",
"BT_6LOWPAN", # Defined in Linux, mentioned in docs
"COUNTER_RTC_STM32_CLOCK_SRC",
"CRC", # Used in TI CC13x2 / CC26x2 SDK comment
"DEEP_SLEEP", # #defined by RV32M1 in ext/
"DESCRIPTION",
"ERR",
"ESP_DIF_LIBRARY", # Referenced in CMake comment
"EXPERIMENTAL",
"FFT", # Used as an example in cmake/extensions.cmake
"FLAG", # Used as an example
"FOO",
"FOO_LOG_LEVEL",
"FOO_SETTING_1",
"FOO_SETTING_2",
"LIS2DW12_INT_PIN",
"LSM6DSO_INT_PIN",
"MISSING",
"MODULES",
"MYFEATURE",
"MY_DRIVER_0",
"NORMAL_SLEEP", # #defined by RV32M1 in ext/
"OPT",
"OPT_0",
"PEDO_THS_MIN",
"REG1",
"REG2",
"SAMPLE_MODULE_LOG_LEVEL", # Used as an example in samples/subsys/logging
"SAMPLE_MODULE_LOG_LEVEL_DBG", # Used in tests/subsys/logging/log_api
"SEL",
"SHIFT",
"SOC_WATCH", # Issue 13749
"SOME_BOOL",
"SOME_INT",
"SOME_OTHER_BOOL",
"SOME_STRING",
"SRAM2", # Referenced in a comment in samples/application_development
"STACK_SIZE", # Used as an example in the Kconfig docs
"STD_CPP", # Referenced in CMake comment
"TAGOIO_HTTP_POST_LOG_LEVEL", # Used as in samples/net/cloud/tagoio
"TEST1",
"TYPE_BOOLEAN",
"USB_CONSOLE",
"USE_STDC_",
"WHATEVER",
}
class KconfigBasicCheck(KconfigCheck, ComplianceTest):
"""
Checks is we are introducing any new warnings/errors with Kconfig,
for example using undefiend Kconfig variables.
This runs the basic Kconfig test, which is checking only for undefined
references inside the Kconfig tree.
"""
name = "KconfigBasic"
doc = "See https://docs.zephyrproject.org/latest/guides/kconfig/index.html for more details."
path_hint = ZEPHYR_BASE
def run(self):
super().run(full=False)
class Codeowners(ComplianceTest):
"""
Check if added files have an owner.
"""
name = "Codeowners"
doc = "See https://help.github.com/articles/about-code-owners/ for more details."
path_hint = "<git-top>"
def ls_owned_files(self, codeowners):
"""Returns an OrderedDict mapping git patterns from the CODEOWNERS file
to the corresponding list of files found on the filesystem. It
unfortunately does not seem possible to invoke git and re-use
how 'git ignore' and/or 'git attributes' already implement this,
we must re-invent it.
"""
# TODO: filter out files not in "git ls-files" (e.g.,
# twister-out) _if_ the overhead isn't too high for a clean tree.
#
# pathlib.match() doesn't support **, so it looks like we can't
# recursively glob the output of ls-files directly, only real
# files :-(
pattern2files = collections.OrderedDict()
top_path = Path(GIT_TOP)
with open(codeowners, "r") as codeo:
for lineno, line in enumerate(codeo, start=1):
if line.startswith("#") or not line.strip():
continue
match = re.match(r"^([^\s,]+)\s+[^\s]+", line)
if not match:
self.add_failure(
"Invalid CODEOWNERS line %d\n\t%s" %
(lineno, line))
continue
git_patrn = match.group(1)
glob = self.git_pattern_to_glob(git_patrn)
files = []
for abs_path in top_path.glob(glob):
# comparing strings is much faster later
files.append(str(abs_path.relative_to(top_path)))
if not files:
self.add_failure("Path '{}' not found in the tree but is listed in "
"CODEOWNERS".format(git_patrn))
pattern2files[git_patrn] = files
return pattern2files
def git_pattern_to_glob(self, git_pattern):
"""Appends and prepends '**[/*]' when needed. Result has neither a
leading nor a trailing slash.
"""
if git_pattern.startswith("/"):
ret = git_pattern[1:]
else:
ret = "**/" + git_pattern
if git_pattern.endswith("/"):
ret = ret + "**/*"
elif os.path.isdir(os.path.join(GIT_TOP, ret)):
self.add_failure("Expected '/' after directory '{}' "
"in CODEOWNERS".format(ret))
return ret
def run(self):
# TODO: testing an old self.commit range that doesn't end
# with HEAD is most likely a mistake. Should warn, see
# https://github.com/zephyrproject-rtos/ci-tools/pull/24
codeowners = os.path.join(GIT_TOP, "CODEOWNERS")
if not os.path.exists(codeowners):
self.skip("CODEOWNERS not available in this repo")
name_changes = git("diff", "--name-only", "--diff-filter=ARCD",
COMMIT_RANGE)
owners_changes = git("diff", "--name-only", COMMIT_RANGE,
"--", codeowners)
if not name_changes and not owners_changes:
# TODO: 1. decouple basic and cheap CODEOWNERS syntax
# validation from the expensive ls_owned_files() scanning of
# the entire tree. 2. run the former always.
return
logging.info("If this takes too long then cleanup and try again")
patrn2files = self.ls_owned_files(codeowners)
# The way git finds Renames and Copies is not "exact science",
# however if one is missed then it will always be reported as an
# Addition instead.
new_files = git("diff", "--name-only", "--diff-filter=ARC",
COMMIT_RANGE).splitlines()
logging.debug("New files %s", new_files)
# Convert to pathlib.Path string representation (e.g.,
# backslashes 'dir1\dir2\' on Windows) to be consistent
# with self.ls_owned_files()
new_files = [str(Path(f)) for f in new_files]
new_not_owned = []
for newf in new_files:
f_is_owned = False
for git_pat, owned in patrn2files.items():
logging.debug("Scanning %s for %s", git_pat, newf)
if newf in owned:
logging.info("%s matches new file %s", git_pat, newf)
f_is_owned = True
# Unlike github, we don't care about finding any
# more specific owner.
break
if not f_is_owned:
new_not_owned.append(newf)
if new_not_owned:
self.add_failure("New files added that are not covered in "
"CODEOWNERS:\n\n" + "\n".join(new_not_owned) +
"\n\nPlease add one or more entries in the "
"CODEOWNERS file to cover those files")
class Nits(ComplianceTest):
"""
Checks various nits in added/modified files. Doesn't check stuff that's
already covered by e.g. checkpatch.pl and pylint.
"""
name = "Nits"
doc = "See https://docs.zephyrproject.org/latest/contribute/#coding-style for more details."
path_hint = "<git-top>"
def run(self):
# Loop through added/modified files
for fname in git("diff", "--name-only", "--diff-filter=d",
COMMIT_RANGE).splitlines():
if "Kconfig" in fname:
self.check_kconfig_header(fname)
self.check_redundant_zephyr_source(fname)
if fname.startswith("dts/bindings/"):
self.check_redundant_document_separator(fname)
if fname.endswith((".c", ".conf", ".cpp", ".dts", ".overlay",
".h", ".ld", ".py", ".rst", ".txt", ".yaml",
".yml")) or \
"Kconfig" in fname or \
"defconfig" in fname or \
fname == "README":
self.check_source_file(fname)
def check_kconfig_header(self, fname):
# Checks for a spammy copy-pasted header format
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
# 'Kconfig - yada yada' has a copy-pasted redundant filename at the
# top. This probably means all of the header was copy-pasted.
if re.match(r"\s*#\s*(K|k)config[\w.-]*\s*-", contents):
self.add_failure("""
Please use this format for the header in '{}' (see
https://docs.zephyrproject.org/latest/guides/kconfig/index.html#header-comments-and-other-nits):
# <Overview of symbols defined in the file, preferably in plain English>
(Blank line)
# Copyright (c) 2019 ...
# SPDX-License-Identifier: <License>
(Blank line)
(Kconfig definitions)
Skip the "Kconfig - " part of the first line, since it's clear that the comment
is about Kconfig from context. The "# Kconfig - " is what triggers this
failure.
""".format(fname))
def check_redundant_zephyr_source(self, fname):
# Checks for 'source "$(ZEPHYR_BASE)/Kconfig[.zephyr]"', which can be
# be simplified to 'source "Kconfig[.zephyr]"'
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
# Look for e.g. rsource as well, for completeness
match = re.search(
r'^\s*(?:o|r|or)?source\s*"\$\(?ZEPHYR_BASE\)?/(Kconfig(?:\.zephyr)?)"',
f.read(), re.MULTILINE)
if match:
self.add_failure("""
Redundant 'source "$(ZEPHYR_BASE)/{0}" in '{1}'. Just do 'source "{0}"'
instead. The $srctree environment variable already points to the Zephyr root,
and all 'source's are relative to it.""".format(match.group(1), fname))
def check_redundant_document_separator(self, fname):
# Looks for redundant '...' document separators in bindings
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
if re.search(r"^\.\.\.", f.read(), re.MULTILINE):
self.add_failure(f"""\
Redundant '...' document separator in {fname}. Binding YAML files are never
concatenated together, so no document separators are needed.""")
def check_source_file(self, fname):
# Generic nits related to various source files
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
if not contents.endswith("\n"):
self.add_failure("Missing newline at end of '{}'. Check your text "
"editor settings.".format(fname))
if contents.startswith("\n"):
self.add_failure("Please remove blank lines at start of '{}'"
.format(fname))
if contents.endswith("\n\n"):
self.add_failure("Please remove blank lines at end of '{}'"
.format(fname))
class GitLint(ComplianceTest):
"""
Runs gitlint on the commits and finds issues with style and syntax
"""
name = "Gitlint"
doc = "See https://docs.zephyrproject.org/latest/contribute/#commit-guidelines for more details"
path_hint = "<git-top>"
def run(self):
# By default gitlint looks for .gitlint configuration only in
# the current directory
proc = subprocess.Popen('gitlint --commits ' + COMMIT_RANGE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
msg = ""
if proc.wait() != 0:
msg = proc.stdout.read()
if msg != "":
self.add_failure(msg.decode("utf-8"))
class PyLint(ComplianceTest):
"""
Runs pylint on all .py files, with a limited set of checks enabled. The
configuration is in the pylintrc file.
"""
name = "pylint"
doc = "See https://www.pylint.org/ for more details"
path_hint = "<git-top>"
def run(self):
# Path to pylint configuration file
pylintrc = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pylintrc"))
# List of files added/modified by the commit(s).
files = git(
"diff", "--name-only", "--diff-filter=d", COMMIT_RANGE, "--",
# Skip to work around crash in pylint 2.2.2:
# https://github.com/PyCQA/pylint/issues/2906
":!boards/xtensa/intel_s1000_crb/support/create_board_img.py") \
.splitlines()
# Filter out everything but Python files. Keep filenames
# relative (to GIT_TOP) to stay farther from any command line
# limit.
py_files = filter_py(GIT_TOP, files)
if not py_files:
return
pylintcmd = ["pylint", "--rcfile=" + pylintrc] + py_files
logger.info(cmd2str(pylintcmd))
try:
# Run pylint on added/modified Python files
process = subprocess.Popen(
pylintcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=GIT_TOP)
except OSError as e:
self.error(f"Failed to run {cmd2str(pylintcmd)}: {e}")
stdout, stderr = process.communicate()
if process.returncode or stderr:
# Issues found, or a problem with pylint itself
self.add_failure(stdout.decode("utf-8") + stderr.decode("utf-8"))
def filter_py(root, fnames):
# PyLint check helper. Returns all Python script filenames among the
# filenames in 'fnames', relative to directory 'root'. Uses the
# python-magic library, so that we can detect Python files that
# don't end in .py as well. python-magic is a frontend to libmagic,
# which is also used by 'file'.
return [fname for fname in fnames
if fname.endswith(".py") or
magic.from_file(os.path.join(root, fname),
mime=True) == "text/x-python"]
class Identity(ComplianceTest):
"""
Checks if Emails of author and signed-off messages are consistent.
"""
name = "Identity"
doc = "See https://docs.zephyrproject.org/latest/contribute/#commit-guidelines for more details"
# git rev-list and git log don't depend on the current (sub)directory
# unless explicited
path_hint = "<git-top>"
def run(self):
for shaidx in get_shas(COMMIT_RANGE):
commit = git("log", "--decorate=short", "-n 1", shaidx)
signed = []
author = ""
sha = ""
parsed_addr = None
for line in commit.split("\n"):
match = re.search(r"^commit\s([^\s]*)", line)
if match:
sha = match.group(1)
match = re.search(r"^Author:\s(.*)", line)
if match:
author = match.group(1)
parsed_addr = parseaddr(author)
match = re.search(r"signed-off-by:\s(.*)", line, re.IGNORECASE)
if match:
signed.append(match.group(1))
error1 = "%s: author email (%s) needs to match one of the signed-off-by entries." % (
sha, author)
error2 = "%s: author email (%s) does not follow the syntax: First Last <email>." % (
sha, author)
error3 = "%s: author email (%s) must be a real email and cannot end in @users.noreply.github.com" % (
sha, author)
failure = None
if author not in signed:
failure = error1
if not parsed_addr or len(parsed_addr[0].split(" ")) < 2:
if not failure:
failure = error2
else:
failure = failure + "\n" + error2
elif parsed_addr[1].endswith("@users.noreply.github.com"):
failure = error3
if failure:
self.add_failure(failure)
def init_logs(cli_arg):
# Initializes logging
# TODO: there may be a shorter version thanks to:
# logging.basicConfig(...)
global logger
level = os.environ.get('LOG_LEVEL', "WARN")
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(levelname)-8s: %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(cli_arg if cli_arg else level)
logging.info("Log init completed, level=%s",
logging.getLevelName(logger.getEffectiveLevel()))
def parse_args():
parser = argparse.ArgumentParser(
description="Check for coding style and documentation warnings.")
parser.add_argument('-c', '--commits', default="HEAD~1..",
help='''Commit range in the form: a..[b], default is
HEAD~1..HEAD''')
parser.add_argument('-r', '--repo', default=None,
help="GitHub repository")
parser.add_argument('-p', '--pull-request', default=0, type=int,
help="Pull request number")
parser.add_argument('-S', '--sha', default=None, help="Commit SHA")
parser.add_argument('-o', '--output', default="compliance.xml",
help='''Name of outfile in JUnit format,
default is ./compliance.xml''')
parser.add_argument('-l', '--list', action="store_true",
help="List all checks and exit")
parser.add_argument("-v", "--loglevel", help="python logging level")
parser.add_argument('-m', '--module', action="append", default=[],
help="Checks to run. All checks by default.")
parser.add_argument('-e', '--exclude-module', action="append", default=[],
help="Do not run the specified checks")
parser.add_argument('-j', '--previous-run', default=None,
help='''Pre-load JUnit results in XML format
from a previous run and combine with new results.''')
return parser.parse_args()
def _main(args):
# The "real" main(), which is wrapped to catch exceptions and report them
# to GitHub. Returns the number of test failures.
# The absolute path of the top-level git directory. Initialize it here so
# that issues running Git can be reported to GitHub.
global GIT_TOP
GIT_TOP = git("rev-parse", "--show-toplevel")
# The commit range passed in --commit, e.g. "HEAD~3"
global COMMIT_RANGE
COMMIT_RANGE = args.commits
init_logs(args.loglevel)
if args.list:
for testcase in ComplianceTest.__subclasses__():
print(testcase.name)
return 0
# Load saved test results from an earlier run, if requested
if args.previous_run:
if not os.path.exists(args.previous_run):
# This probably means that an earlier pass had an internal error
# (the script is currently run multiple times by the ci-pipelines
# repo). Since that earlier pass might've posted an error to
# GitHub, avoid generating a GitHub comment here, by avoiding
# sys.exit() (which gets caught in main()).
print("error: '{}' not found".format(args.previous_run),
file=sys.stderr)
return 1
logging.info("Loading previous results from " + args.previous_run)
for loaded_suite in JUnitXml.fromfile(args.previous_run):
suite = loaded_suite
break
else:
suite = TestSuite("Compliance")
for testcase in ComplianceTest.__subclasses__():
# "Modules" and "testcases" are the same thing. Better flags would have
# been --tests and --exclude-tests or the like, but it's awkward to
# change now.
if args.module and testcase.name not in args.module:
continue
if testcase.name in args.exclude_module:
print("Skipping " + testcase.name)
continue
test = testcase()
try:
print(f"Running {test.name:16} tests in "
f"{GIT_TOP if test.path_hint == '<git-top>' else test.path_hint} ...")
test.run()
except EndTest:
pass
suite.add_testcase(test.case)
xml = JUnitXml()
xml.add_testsuite(suite)
xml.update_statistics()
xml.write(args.output, pretty=True)
failed_cases = []
name2doc = {testcase.name: testcase.doc
for testcase in ComplianceTest.__subclasses__()}
for case in suite:
if case.result:
if case.result.type == 'skipped':
logging.warning("Skipped %s, %s", case.name, case.result.message)
else:
failed_cases.append(case)
else:
# Some checks like codeowners can produce no .result
logging.info("No JUnit result for %s", case.name)
n_fails = len(failed_cases)
if n_fails:
print("{} checks failed".format(n_fails))
for case in failed_cases:
# not clear why junitxml doesn't clearly expose the most
# important part of its underlying etree.Element
errmsg = case.result._elem.text
logging.error("Test %s failed: %s", case.name,
errmsg.strip() if errmsg else case.result.message)
with open(f"{case.name}.txt", "w") as f:
docs = name2doc.get(case.name)
f.write(f"{docs}\n\n")
f.write(errmsg.strip() if errmsg else case.result.message)
print("\nComplete results in " + args.output)
return n_fails
def main():
args = parse_args()
try:
n_fails = _main(args)
except BaseException:
# Catch BaseException instead of Exception to include stuff like
# SystemExit (raised by sys.exit())
print("Python exception in `{}`:\n\n"
"```\n{}\n```".format(__file__, traceback.format_exc()))
raise
sys.exit(n_fails)
def cmd2str(cmd):
# Formats the command-line arguments in the iterable 'cmd' into a string,
# for error messages and the like
return " ".join(shlex.quote(word) for word in cmd)
def err(msg):
cmd = sys.argv[0] # Empty if missing
if cmd:
cmd += ": "
sys.exit(cmd + "error: " + msg)
if __name__ == "__main__":
main()
| []
| []
| [
"SOC_DIR",
"DEVICETREE_CONF",
"GENERATED_DTS_BOARD_CONF",
"LOG_LEVEL",
"BOARD_DIR",
"ZEPHYR_BASE",
"srctree",
"KCONFIG_WARN_UNDEF",
"ARCH",
"ARCH_DIR",
"KCONFIG_BINARY_DIR"
]
| [] | ["SOC_DIR", "DEVICETREE_CONF", "GENERATED_DTS_BOARD_CONF", "LOG_LEVEL", "BOARD_DIR", "ZEPHYR_BASE", "srctree", "KCONFIG_WARN_UNDEF", "ARCH", "ARCH_DIR", "KCONFIG_BINARY_DIR"] | python | 11 | 0 | |
util/build_docs.py | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
#
# Usage:
# run './build_docs.py' to generate the documentation and keep it updated
# open 'http://localhost:1313/' to check live update (this opens the top
# level index page). you can also directly access a specific document by
# accessing 'http://localhost:1313/path/to/doc',
# e.g. http://localhost:1313/hw/ip/uart/doc
import argparse
import logging
import os
import platform
import re
import subprocess
import sys
import textwrap
from pathlib import Path
import hjson
import dashboard.gen_dashboard_entry as gen_dashboard_entry
import difgen.gen_dif_listing as gen_dif_listing
import reggen.gen_cfg_html as gen_cfg_html
import reggen.gen_html as gen_html
import reggen.validate as validate
import reggen.gen_selfdoc as reggen_selfdoc
import dvsim.testplanner.testplan_utils as testplan_utils
import tlgen
USAGE = """
build_docs [options]
"""
# Version of hugo extended to be used to build the docs
HUGO_EXTENDED_VERSION = "0.60.0"
# Configurations
# TODO: Move to config.yaml
SRCTREE_TOP = Path(__file__).parent.joinpath('..').resolve()
config = {
# Toplevel source directory
"topdir":
SRCTREE_TOP,
# Pre-generate register and hwcfg fragments from these files.
"hardware_definitions": [
"hw/ip/aes/data/aes.hjson",
"hw/top_earlgrey/ip/alert_handler/data/autogen/alert_handler.hjson",
"hw/ip/entropy_src/data/entropy_src.hjson",
"hw/ip/flash_ctrl/data/flash_ctrl.hjson",
"hw/ip/gpio/data/gpio.hjson",
"hw/ip/hmac/data/hmac.hjson",
"hw/ip/i2c/data/i2c.hjson",
"hw/ip/keymgr/data/keymgr.hjson",
"hw/ip/nmi_gen/data/nmi_gen.hjson",
"hw/ip/otbn/data/otbn.hjson",
"hw/ip/otp_ctrl/data/otp_ctrl.hjson",
"hw/ip/padctrl/data/padctrl.hjson",
"hw/top_earlgrey/ip/pinmux/data/autogen/pinmux.hjson",
"hw/top_earlgrey/ip/rv_plic/data/autogen/rv_plic.hjson",
"hw/top_earlgrey/ip/pwrmgr/data/autogen/pwrmgr.hjson",
"hw/ip/rv_timer/data/rv_timer.hjson",
"hw/ip/spi_device/data/spi_device.hjson",
"hw/ip/uart/data/uart.hjson",
"hw/ip/usbdev/data/usbdev.hjson",
"hw/ip/usbuart/data/usbuart.hjson",
],
# Pre-generate dashboard fragments from these directories.
"dashboard_definitions": [
"hw/ip",
],
# Pre-generate testplan fragments from these files.
"testplan_definitions": [
"hw/ip/aes/data/aes_testplan.hjson",
"hw/ip/alert_handler/data/alert_handler_testplan.hjson",
"hw/ip/entropy_src/data/entropy_src_testplan.hjson",
"hw/ip/flash_ctrl/data/flash_ctrl_testplan.hjson",
"hw/ip/gpio/data/gpio_testplan.hjson",
"hw/ip/hmac/data/hmac_testplan.hjson",
"hw/ip/i2c/data/i2c_testplan.hjson",
"hw/ip/keymgr/data/keymgr_testplan.hjson",
"hw/ip/padctrl/data/padctrl_fpv_testplan.hjson",
"hw/ip/pinmux/data/pinmux_fpv_testplan.hjson",
"hw/ip/rv_plic/data/rv_plic_fpv_testplan.hjson",
"hw/ip/rv_timer/data/rv_timer_testplan.hjson",
"hw/ip/spi_device/data/spi_device_testplan.hjson",
"hw/ip/uart/data/uart_testplan.hjson",
"hw/ip/usbdev/data/usbdev_testplan.hjson",
"hw/ip/tlul/data/tlul_testplan.hjson",
"hw/top_earlgrey/data/standalone_sw_testplan.hjson",
"util/dvsim/testplanner/examples/foo_testplan.hjson",
],
# Pre-generated utility selfdoc
"selfdoc_tools": ["tlgen", "reggen"],
# DIF Docs
"difs-directory": "sw/device/lib/dif",
# Output directory for documents
"outdir":
SRCTREE_TOP.joinpath('build', 'docs'),
"outdir-generated":
SRCTREE_TOP.joinpath('build', 'docs-generated'),
"verbose":
False,
}
def generate_dashboards():
for dashboard in config["dashboard_definitions"]:
hjson_paths = []
hjson_paths.extend(
sorted(SRCTREE_TOP.joinpath(dashboard).rglob('*.prj.hjson')))
dashboard_path = config["outdir-generated"].joinpath(
dashboard, 'dashboard')
dashboard_html = open(str(dashboard_path), mode='w')
for hjson_path in hjson_paths:
gen_dashboard_entry.gen_dashboard_html(hjson_path, dashboard_html)
dashboard_html.close()
def generate_hardware_blocks():
for hardware in config["hardware_definitions"]:
hardware_file = open(str(SRCTREE_TOP.joinpath(hardware)))
regs = hjson.load(hardware_file,
use_decimal=True,
object_pairs_hook=validate.checking_dict)
if validate.validate(regs) == 0:
logging.info("Parsed %s" % (hardware))
else:
logging.fatal("Failed to parse %s" % (hardware))
base_path = config["outdir-generated"].joinpath(hardware)
base_path.parent.mkdir(parents=True, exist_ok=True)
regs_html = open(str(base_path.parent.joinpath(base_path.name +
'.registers')),
mode='w')
gen_html.gen_html(regs, regs_html)
regs_html.close()
hwcfg_html = open(str(base_path.parent.joinpath(base_path.name + '.hwcfg')),
mode='w')
gen_cfg_html.gen_cfg_html(regs, hwcfg_html)
hwcfg_html.close()
def generate_testplans():
for testplan in config["testplan_definitions"]:
plan = testplan_utils.parse_testplan(SRCTREE_TOP.joinpath(testplan))
plan_path = config["outdir-generated"].joinpath(testplan + '.testplan')
plan_path.parent.mkdir(parents=True, exist_ok=True)
testplan_html = open(str(plan_path), mode='w')
testplan_utils.gen_html_testplan_table(plan, testplan_html)
testplan_html.close()
def generate_selfdocs():
"""Generate documents for the tools in `util/` if `--doc` option exists.
Each tool creates selfdoc differently. Manually invoked.
"""
for tool in config["selfdoc_tools"]:
selfdoc_path = config["outdir-generated"].joinpath(tool + '.selfdoc')
selfdoc_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(selfdoc_path), mode='w') as fout:
if tool == "reggen":
reggen_selfdoc.document(fout)
elif tool == "tlgen":
fout.write(tlgen.selfdoc(heading=3, cmd='tlgen.py --doc'))
def generate_apt_reqs():
"""Generate an apt-get command line invocation from apt-requirements.txt
This will be saved in outdir-generated/apt_cmd.txt
"""
# Read the apt-requirements.txt
apt_requirements = []
requirements_file = open(str(SRCTREE_TOP.joinpath("apt-requirements.txt")))
for package_line in requirements_file.readlines():
# Ignore everything after `#` on each line, and strip whitespace
package = package_line.split('#', 1)[0].strip()
if package:
# only add non-empty lines to packages
apt_requirements.append(package)
apt_cmd = "$ sudo apt-get install " + " ".join(apt_requirements)
apt_cmd_lines = textwrap.wrap(apt_cmd,
width=78,
replace_whitespace=True,
subsequent_indent=' ')
# Newlines need to be escaped
apt_cmd = " \\\n".join(apt_cmd_lines)
# And then to write the generated string directly to the file.
apt_cmd_path = config["outdir-generated"].joinpath('apt_cmd.txt')
apt_cmd_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(apt_cmd_path), mode='w') as fout:
fout.write(apt_cmd)
def generate_tool_versions():
"""Generate an tool version number requirement from tool_requirements.py
The version number per tool will be saved in outdir-generated/version_$TOOL_NAME.txt
"""
# Populate __TOOL_REQUIREMENTS__
requirements_file = str(SRCTREE_TOP.joinpath("tool_requirements.py"))
exec(open(requirements_file).read(), globals())
# And then write a version file for every tool.
for tool in __TOOL_REQUIREMENTS__: # noqa: F821
version_path = config["outdir-generated"].joinpath('version_' + tool + '.txt')
version_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(version_path), mode='w') as fout:
fout.write(__TOOL_REQUIREMENTS__[tool]) # noqa: F821
def generate_dif_docs():
"""Generate doxygen documentation and DIF listings from DIF source comments.
This invokes Doxygen, and a few other things. Be careful of changing any
paths here, some correspond to paths in other configuration files.
"""
logging.info("Generating Software API Documentation (Doxygen)...")
doxygen_out_path = config["outdir-generated"].joinpath("sw")
# The next two paths correspond to relative paths specified in the Doxyfile
doxygen_xml_path = doxygen_out_path.joinpath("api-xml")
# We need to prepare this path because doxygen won't `mkdir -p`
doxygen_sw_path = doxygen_out_path.joinpath("public-api/sw/apis")
doxygen_sw_path.mkdir(parents=True, exist_ok=True)
# This is where warnings will be generated
doxygen_warnings_path = doxygen_out_path.joinpath("doxygen_warnings.log")
if doxygen_warnings_path.exists():
doxygen_warnings_path.unlink()
doxygen_args = [
"doxygen",
str(SRCTREE_TOP.joinpath("util/doxygen/Doxyfile")),
]
doxygen_results = subprocess.run(doxygen_args, check=True,
cwd=str(SRCTREE_TOP), stdout=subprocess.PIPE,
env=dict(os.environ,
SRCTREE_TOP=str(SRCTREE_TOP),
DOXYGEN_OUT=str(doxygen_out_path),
))
logging.info("Generated Software API Documentation (Doxygen)")
if doxygen_warnings_path.exists():
logging.warning("Doxygen Generated Warnings (saved in {})".format(str(doxygen_warnings_path)))
combined_xml = gen_dif_listing.get_combined_xml(doxygen_xml_path)
dif_paths = []
dif_paths.extend(sorted(SRCTREE_TOP.joinpath(config["difs-directory"]).glob("dif_*.h")))
dif_listings_root_path = config["outdir-generated"].joinpath("sw/difs_listings")
difrefs_root_path = config["outdir-generated"].joinpath("sw/difref")
for dif_header_path in dif_paths:
dif_header = str(dif_header_path.relative_to(SRCTREE_TOP))
dif_listings_filename = dif_listings_root_path.joinpath(dif_header + ".html")
dif_listings_filename.parent.mkdir(parents=True, exist_ok=True)
with open(str(dif_listings_filename), mode='w') as dif_listings_html:
gen_dif_listing.gen_listing_html(combined_xml, dif_header,
dif_listings_html)
difref_functions = gen_dif_listing.get_difref_info(combined_xml, dif_header)
for function in difref_functions:
difref_filename = difrefs_root_path.joinpath(function["name"] + '.html')
difref_filename.parent.mkdir(parents=True, exist_ok=True)
with open(str(difref_filename), mode='w') as difref_html:
gen_dif_listing.gen_difref_html(function, difref_html)
logging.info("Generated DIF Listing for {}".format(dif_header))
def generate_otbn_isa():
'''Generate the OTBN ISA documentation fragment
The result is in Markdown format and is written to
outdir-generated/otbn-isa.md
'''
otbn_dir = SRCTREE_TOP / 'hw/ip/otbn'
script = otbn_dir / 'util/yaml_to_doc.py'
yaml_file = otbn_dir / 'data/insns.yml'
out_dir = config['outdir-generated'].joinpath('otbn-isa')
subprocess.run([str(script), str(yaml_file), str(out_dir)], check=True)
def hugo_match_version(hugo_bin_path, version):
logging.info("Hugo binary path: %s", hugo_bin_path)
args = [str(hugo_bin_path), "version"]
process = subprocess.run(args,
universal_newlines=True,
stdout=subprocess.PIPE,
check=True,
cwd=str(SRCTREE_TOP))
logging.info("Checking for correct Hugo version: %s", version)
# Hugo version string example:
# "Hugo Static Site Generator v0.59.0-1DD0C69C/extended linux/amd64 BuildDate: 2019-10-21T09:45:38Z" # noqa: E501
return bool(re.search("v" + version + ".*/extended", process.stdout))
def install_hugo(install_dir):
"""Download and "install" hugo into |install_dir|
install_dir is created if it doesn't exist yet.
Limitations:
Currently only 64 bit Linux is supported."""
# TODO: Support more configurations
if platform.system() != 'Linux' or platform.machine() != 'x86_64':
logging.fatal(
"Auto-install of hugo only supported for 64 bit Linux "
"currently. Manually install hugo and re-run this script.")
return False
download_url = ('https://github.com/gohugoio/hugo/releases/download/v{version}'
'/hugo_extended_{version}_Linux-64bit.tar.gz').format(
version=HUGO_EXTENDED_VERSION)
install_dir.mkdir(exist_ok=True, parents=True)
hugo_bin_path = install_dir / 'hugo'
try:
if hugo_match_version(hugo_bin_path, HUGO_EXTENDED_VERSION):
return hugo_bin_path
except PermissionError:
# If there is an error checking the version just continue to download
logging.info("Hugo version could not be verified. Continue to download.")
except FileNotFoundError:
pass
# TODO: Investigate the use of Python builtins for downloading. Extracting
# the archive will probably will be a call to tar.
cmd = 'curl -sL {download_url} | tar -xzO --overwrite hugo > {hugo_bin_file}'.format(
hugo_bin_file=str(hugo_bin_path), download_url=download_url)
logging.info("Calling %s to download hugo.", cmd)
subprocess.run(cmd, shell=True, check=True, cwd=str(SRCTREE_TOP))
hugo_bin_path.chmod(0o755)
return hugo_bin_path
def invoke_hugo(preview, hugo_bin_path):
site_docs = SRCTREE_TOP.joinpath('site', 'docs')
config_file = str(site_docs.joinpath('config.toml'))
layout_dir = str(site_docs.joinpath('layouts'))
args = [
str(hugo_bin_path),
"--config",
config_file,
"--destination",
str(config["outdir"]),
"--contentDir",
str(SRCTREE_TOP),
"--layoutDir",
layout_dir,
]
if preview:
args += ["server"]
subprocess.run(args, check=True, cwd=str(SRCTREE_TOP))
def main():
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M")
parser = argparse.ArgumentParser(
prog="build_docs",
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE)
parser.add_argument(
'--preview',
action='store_true',
help="""Starts a local server with live reload (updates triggered upon
changes in the documentation files). This feature is intended
to preview the documentation locally.""")
parser.add_argument(
'--force-global',
action='store_true',
help="""Use a global installation of Hugo. This skips the version
check and relies on Hugo to be available from the environment.""")
parser.add_argument('--hugo', help="""TODO""")
args = parser.parse_args()
generate_hardware_blocks()
generate_dashboards()
generate_testplans()
generate_selfdocs()
generate_apt_reqs()
generate_tool_versions()
generate_dif_docs()
generate_otbn_isa()
hugo_localinstall_dir = SRCTREE_TOP / 'build' / 'docs-hugo'
os.environ["PATH"] += os.pathsep + str(hugo_localinstall_dir)
hugo_bin_path = "hugo"
if not args.force_global:
try:
hugo_bin_path = install_hugo(hugo_localinstall_dir)
except KeyboardInterrupt:
pass
try:
invoke_hugo(args.preview, hugo_bin_path)
except subprocess.CalledProcessError:
sys.exit("Error building site")
except PermissionError:
sys.exit("Error running Hugo")
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
test/functional/test_framework/test_framework.py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a bitcoinflex test script.
Individual bitcoinflex test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinflexds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinflexds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing bitcoinflexd/bitcoinflex-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoinflex-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinflexds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoinflexd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinflexds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoinflexd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoinflexd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoinflexd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoinflexd should have exited with an error"
else:
assert_msg = "bitcoinflexd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOINFLEXD", "bitcoinflexd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallets', 'chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some bitcoinflexd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOINFLEXD", "bitcoinflexd"),
help="bitcoinflexd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOINFLEXD", "bitcoinflexd"),
help="bitcoinflexd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| []
| []
| [
"BITCOINFLEXD",
"PATH"
]
| [] | ["BITCOINFLEXD", "PATH"] | python | 2 | 0 | |
provider/cloudflare/cloudflare.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudflare
import (
"context"
"fmt"
"os"
"strconv"
"strings"
cloudflare "github.com/cloudflare/cloudflare-go"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/external-dns/endpoint"
"sigs.k8s.io/external-dns/plan"
"sigs.k8s.io/external-dns/provider"
"sigs.k8s.io/external-dns/source"
)
const (
// cloudFlareCreate is a ChangeAction enum value
cloudFlareCreate = "CREATE"
// cloudFlareDelete is a ChangeAction enum value
cloudFlareDelete = "DELETE"
// cloudFlareUpdate is a ChangeAction enum value
cloudFlareUpdate = "UPDATE"
// defaultCloudFlareRecordTTL 1 = automatic
defaultCloudFlareRecordTTL = 1
)
var cloudFlareTypeNotSupported = map[string]bool{
"LOC": true,
"MX": true,
"NS": true,
"SPF": true,
"TXT": true,
"SRV": true,
}
// cloudFlareDNS is the subset of the CloudFlare API that we actually use. Add methods as required. Signatures must match exactly.
type cloudFlareDNS interface {
UserDetails() (cloudflare.User, error)
ZoneIDByName(zoneName string) (string, error)
ListZones(zoneID ...string) ([]cloudflare.Zone, error)
ListZonesContext(ctx context.Context, opts ...cloudflare.ReqOption) (cloudflare.ZonesResponse, error)
ZoneDetails(zoneID string) (cloudflare.Zone, error)
DNSRecords(zoneID string, rr cloudflare.DNSRecord) ([]cloudflare.DNSRecord, error)
CreateDNSRecord(zoneID string, rr cloudflare.DNSRecord) (*cloudflare.DNSRecordResponse, error)
DeleteDNSRecord(zoneID, recordID string) error
UpdateDNSRecord(zoneID, recordID string, rr cloudflare.DNSRecord) error
}
type zoneService struct {
service *cloudflare.API
}
func (z zoneService) UserDetails() (cloudflare.User, error) {
return z.service.UserDetails()
}
func (z zoneService) ListZones(zoneID ...string) ([]cloudflare.Zone, error) {
return z.service.ListZones(zoneID...)
}
func (z zoneService) ZoneIDByName(zoneName string) (string, error) {
return z.service.ZoneIDByName(zoneName)
}
func (z zoneService) CreateDNSRecord(zoneID string, rr cloudflare.DNSRecord) (*cloudflare.DNSRecordResponse, error) {
return z.service.CreateDNSRecord(zoneID, rr)
}
func (z zoneService) DNSRecords(zoneID string, rr cloudflare.DNSRecord) ([]cloudflare.DNSRecord, error) {
return z.service.DNSRecords(zoneID, rr)
}
func (z zoneService) UpdateDNSRecord(zoneID, recordID string, rr cloudflare.DNSRecord) error {
return z.service.UpdateDNSRecord(zoneID, recordID, rr)
}
func (z zoneService) DeleteDNSRecord(zoneID, recordID string) error {
return z.service.DeleteDNSRecord(zoneID, recordID)
}
func (z zoneService) ListZonesContext(ctx context.Context, opts ...cloudflare.ReqOption) (cloudflare.ZonesResponse, error) {
return z.service.ListZonesContext(ctx, opts...)
}
func (z zoneService) ZoneDetails(zoneID string) (cloudflare.Zone, error) {
return z.service.ZoneDetails(zoneID)
}
// CloudFlareProvider is an implementation of Provider for CloudFlare DNS.
type CloudFlareProvider struct {
provider.BaseProvider
Client cloudFlareDNS
// only consider hosted zones managing domains ending in this suffix
domainFilter endpoint.DomainFilter
zoneIDFilter provider.ZoneIDFilter
proxiedByDefault bool
DryRun bool
PaginationOptions cloudflare.PaginationOptions
}
// cloudFlareChange differentiates between ChangActions
type cloudFlareChange struct {
Action string
ResourceRecord cloudflare.DNSRecord
}
// NewCloudFlareProvider initializes a new CloudFlare DNS based Provider.
func NewCloudFlareProvider(domainFilter endpoint.DomainFilter, zoneIDFilter provider.ZoneIDFilter, zonesPerPage int, proxiedByDefault bool, dryRun bool) (*CloudFlareProvider, error) {
// initialize via chosen auth method and returns new API object
var (
config *cloudflare.API
err error
)
if os.Getenv("CF_API_TOKEN") != "" {
config, err = cloudflare.NewWithAPIToken(os.Getenv("CF_API_TOKEN"))
} else {
config, err = cloudflare.New(os.Getenv("CF_API_KEY"), os.Getenv("CF_API_EMAIL"))
}
if err != nil {
return nil, fmt.Errorf("failed to initialize cloudflare provider: %v", err)
}
provider := &CloudFlareProvider{
//Client: config,
Client: zoneService{config},
domainFilter: domainFilter,
zoneIDFilter: zoneIDFilter,
proxiedByDefault: proxiedByDefault,
DryRun: dryRun,
PaginationOptions: cloudflare.PaginationOptions{
PerPage: zonesPerPage,
Page: 1,
},
}
return provider, nil
}
// Zones returns the list of hosted zones.
func (p *CloudFlareProvider) Zones(ctx context.Context) ([]cloudflare.Zone, error) {
result := []cloudflare.Zone{}
p.PaginationOptions.Page = 1
// if there is a zoneIDfilter configured
// && if the filter isn't just a blank string (used in tests)
if len(p.zoneIDFilter.ZoneIDs) > 0 && p.zoneIDFilter.ZoneIDs[0] != "" {
log.Debugln("zoneIDFilter configured. only looking up zone IDs defined")
for _, zoneID := range p.zoneIDFilter.ZoneIDs {
log.Debugf("looking up zone %s", zoneID)
detailResponse, err := p.Client.ZoneDetails(zoneID)
if err != nil {
log.Errorf("zone %s lookup failed, %v", zoneID, err)
continue
}
log.WithFields(log.Fields{
"zoneName": detailResponse.Name,
"zoneID": detailResponse.ID,
}).Debugln("adding zone for consideration")
result = append(result, detailResponse)
}
return result, nil
}
log.Debugln("no zoneIDFilter configured, looking at all zones")
for {
zonesResponse, err := p.Client.ListZonesContext(ctx, cloudflare.WithPagination(p.PaginationOptions))
if err != nil {
return nil, err
}
for _, zone := range zonesResponse.Result {
if !p.domainFilter.Match(zone.Name) {
log.Debugf("zone %s not in domain filter", zone.Name)
continue
}
result = append(result, zone)
}
if p.PaginationOptions.Page == zonesResponse.ResultInfo.TotalPages {
break
}
p.PaginationOptions.Page++
}
return result, nil
}
// Records returns the list of records.
func (p *CloudFlareProvider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) {
zones, err := p.Zones(ctx)
if err != nil {
return nil, err
}
endpoints := []*endpoint.Endpoint{}
for _, zone := range zones {
records, err := p.Client.DNSRecords(zone.ID, cloudflare.DNSRecord{})
if err != nil {
return nil, err
}
// As CloudFlare does not support "sets" of targets, but instead returns
// a single entry for each name/type/target, we have to group by name
// and record to allow the planner to calculate the correct plan. See #992.
endpoints = append(endpoints, groupByNameAndType(records)...)
}
return endpoints, nil
}
// ApplyChanges applies a given set of changes in a given zone.
func (p *CloudFlareProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
cloudflareChanges := []*cloudFlareChange{}
for _, endpoint := range changes.Create {
for _, target := range endpoint.Targets {
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareCreate, endpoint, target))
}
}
for i, desired := range changes.UpdateNew {
current := changes.UpdateOld[i]
add, remove, leave := provider.Difference(current.Targets, desired.Targets)
for _, a := range add {
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareCreate, desired, a))
}
for _, a := range leave {
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareUpdate, desired, a))
}
for _, a := range remove {
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareDelete, current, a))
}
}
for _, endpoint := range changes.Delete {
for _, target := range endpoint.Targets {
cloudflareChanges = append(cloudflareChanges, p.newCloudFlareChange(cloudFlareDelete, endpoint, target))
}
}
return p.submitChanges(ctx, cloudflareChanges)
}
func (p *CloudFlareProvider) PropertyValuesEqual(name string, previous string, current string) bool {
if name == source.CloudflareProxiedKey {
return plan.CompareBoolean(p.proxiedByDefault, name, previous, current)
}
return p.BaseProvider.PropertyValuesEqual(name, previous, current)
}
// submitChanges takes a zone and a collection of Changes and sends them as a single transaction.
func (p *CloudFlareProvider) submitChanges(ctx context.Context, changes []*cloudFlareChange) error {
// return early if there is nothing to change
if len(changes) == 0 {
return nil
}
zones, err := p.Zones(ctx)
if err != nil {
return err
}
// separate into per-zone change sets to be passed to the API.
changesByZone := p.changesByZone(zones, changes)
for zoneID, changes := range changesByZone {
records, err := p.Client.DNSRecords(zoneID, cloudflare.DNSRecord{})
if err != nil {
return fmt.Errorf("could not fetch records from zone, %v", err)
}
for _, change := range changes {
logFields := log.Fields{
"record": change.ResourceRecord.Name,
"type": change.ResourceRecord.Type,
"ttl": change.ResourceRecord.TTL,
"action": change.Action,
"zone": zoneID,
}
log.WithFields(logFields).Info("Changing record.")
if p.DryRun {
continue
}
if change.Action == cloudFlareUpdate {
recordID := p.getRecordID(records, change.ResourceRecord)
if recordID == "" {
log.WithFields(logFields).Errorf("failed to find previous record: %v", change.ResourceRecord)
continue
}
err := p.Client.UpdateDNSRecord(zoneID, recordID, change.ResourceRecord)
if err != nil {
log.WithFields(logFields).Errorf("failed to update record: %v", err)
}
} else if change.Action == cloudFlareDelete {
recordID := p.getRecordID(records, change.ResourceRecord)
if recordID == "" {
log.WithFields(logFields).Errorf("failed to find previous record: %v", change.ResourceRecord)
continue
}
err := p.Client.DeleteDNSRecord(zoneID, recordID)
if err != nil {
log.WithFields(logFields).Errorf("failed to delete record: %v", err)
}
} else if change.Action == cloudFlareCreate {
_, err := p.Client.CreateDNSRecord(zoneID, change.ResourceRecord)
if err != nil {
log.WithFields(logFields).Errorf("failed to create record: %v", err)
}
}
}
}
return nil
}
// AdjustEndpoints modifies the endpoints as needed by the specific provider
func (p *CloudFlareProvider) AdjustEndpoints(endpoints []*endpoint.Endpoint) []*endpoint.Endpoint {
adjustedEndpoints := []*endpoint.Endpoint{}
for _, e := range endpoints {
if shouldBeProxied(e, p.proxiedByDefault) {
e.RecordTTL = 0
}
adjustedEndpoints = append(adjustedEndpoints, e)
}
return adjustedEndpoints
}
// changesByZone separates a multi-zone change into a single change per zone.
func (p *CloudFlareProvider) changesByZone(zones []cloudflare.Zone, changeSet []*cloudFlareChange) map[string][]*cloudFlareChange {
changes := make(map[string][]*cloudFlareChange)
zoneNameIDMapper := provider.ZoneIDName{}
for _, z := range zones {
zoneNameIDMapper.Add(z.ID, z.Name)
changes[z.ID] = []*cloudFlareChange{}
}
for _, c := range changeSet {
zoneID, _ := zoneNameIDMapper.FindZone(c.ResourceRecord.Name)
if zoneID == "" {
log.Debugf("Skipping record %s because no hosted zone matching record DNS Name was detected", c.ResourceRecord.Name)
continue
}
changes[zoneID] = append(changes[zoneID], c)
}
return changes
}
func (p *CloudFlareProvider) getRecordID(records []cloudflare.DNSRecord, record cloudflare.DNSRecord) string {
for _, zoneRecord := range records {
if zoneRecord.Name == record.Name && zoneRecord.Type == record.Type && zoneRecord.Content == record.Content {
return zoneRecord.ID
}
}
return ""
}
func (p *CloudFlareProvider) newCloudFlareChange(action string, endpoint *endpoint.Endpoint, target string) *cloudFlareChange {
ttl := defaultCloudFlareRecordTTL
proxied := shouldBeProxied(endpoint, p.proxiedByDefault)
if endpoint.RecordTTL.IsConfigured() {
ttl = int(endpoint.RecordTTL)
}
if len(endpoint.Targets) > 1 {
log.Errorf("Updates should have just one target")
}
return &cloudFlareChange{
Action: action,
ResourceRecord: cloudflare.DNSRecord{
Name: endpoint.DNSName,
TTL: ttl,
Proxied: proxied,
Type: endpoint.RecordType,
Content: target,
},
}
}
func shouldBeProxied(endpoint *endpoint.Endpoint, proxiedByDefault bool) bool {
proxied := proxiedByDefault
for _, v := range endpoint.ProviderSpecific {
if v.Name == source.CloudflareProxiedKey {
b, err := strconv.ParseBool(v.Value)
if err != nil {
log.Errorf("Failed to parse annotation [%s]: %v", source.CloudflareProxiedKey, err)
} else {
proxied = b
}
break
}
}
if cloudFlareTypeNotSupported[endpoint.RecordType] || strings.Contains(endpoint.DNSName, "*") {
proxied = false
}
return proxied
}
func groupByNameAndType(records []cloudflare.DNSRecord) []*endpoint.Endpoint {
endpoints := []*endpoint.Endpoint{}
// group supported records by name and type
groups := map[string][]cloudflare.DNSRecord{}
for _, r := range records {
if !provider.SupportedRecordType(r.Type) {
continue
}
groupBy := r.Name + r.Type
if _, ok := groups[groupBy]; !ok {
groups[groupBy] = []cloudflare.DNSRecord{}
}
groups[groupBy] = append(groups[groupBy], r)
}
// create single endpoint with all the targets for each name/type
for _, records := range groups {
targets := make([]string, len(records))
for i, record := range records {
targets[i] = record.Content
}
endpoints = append(endpoints,
endpoint.NewEndpointWithTTL(
records[0].Name,
records[0].Type,
endpoint.TTL(records[0].TTL),
targets...).
WithProviderSpecific(source.CloudflareProxiedKey, strconv.FormatBool(records[0].Proxied)))
}
return endpoints
}
| [
"\"CF_API_TOKEN\"",
"\"CF_API_TOKEN\"",
"\"CF_API_KEY\"",
"\"CF_API_EMAIL\""
]
| []
| [
"CF_API_EMAIL",
"CF_API_KEY",
"CF_API_TOKEN"
]
| [] | ["CF_API_EMAIL", "CF_API_KEY", "CF_API_TOKEN"] | go | 3 | 0 | |
ztools/xssh/agent/example_test.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package agent_test
import (
"log"
"net"
"os"
"github.com/kwang40/zgrab/ztools/xssh"
"github.com/kwang40/zgrab/ztools/xssh/agent"
)
func ExampleClientAgent() {
// ssh-agent has a UNIX socket under $SSH_AUTH_SOCK
socket := os.Getenv("SSH_AUTH_SOCK")
conn, err := net.Dial("unix", socket)
if err != nil {
log.Fatalf("net.Dial: %v", err)
}
agentClient := agent.NewClient(conn)
config := &xssh.ClientConfig{
User: "username",
Auth: []xssh.AuthMethod{
// Use a callback rather than PublicKeys
// so we only consult the agent once the remote server
// wants it.
xssh.PublicKeysCallback(agentClient.Signers),
},
}
sshc, err := xssh.Dial("tcp", "localhost:22", config)
if err != nil {
log.Fatalf("Dial: %v", err)
}
// .. use sshc
sshc.Close()
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
cmd/govim/internal/golang_org_x_tools/imports/imports.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run mkstdlib.go
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
package imports
import (
"bufio"
"bytes"
"context"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
"github.com/govim/govim/cmd/govim/internal/golang_org_x_tools/gocommand"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
type Options struct {
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
Comments bool // Print comments (true if nil *Options provided)
TabIndent bool // Use tabs for indent (true if nil *Options provided)
TabWidth int // Tab width (8 if nil *Options provided)
FormatOnly bool // Disable the insertion and deletion of imports
}
// Process implements golang.org/x/tools/imports.Process with explicit context in env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
src, opt, err = initialize(filename, src, opt)
if err != nil {
return nil, err
}
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
if !opt.FormatOnly {
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
return nil, err
}
}
return formatFile(fileSet, file, src, adjust, opt)
}
// FixImports returns a list of fixes to the imports that, when applied,
// will leave the imports in the same state as Process.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
src, opt, err = initialize(filename, src, opt)
if err != nil {
return nil, err
}
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
return getFixes(fileSet, file, filename, opt.Env)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
// is added in when parsing the file.
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
src, opt, err = initialize(filename, src, opt)
if err != nil {
return nil, err
}
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
parserMode |= extraMode
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
if file == nil {
return nil, err
}
// Apply the fixes to the file.
apply(fileSet, file, fixes)
return formatFile(fileSet, file, src, nil, opt)
}
// GetAllCandidates gets all of the packages starting with prefix that can be
// imported by filename, sorted by import path.
func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
_, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return err
}
return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env)
}
// GetPackageExports returns all known packages with name pkg and their exports.
func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
_, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return err
}
return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env)
}
// initialize sets the values for opt and src.
// If they are provided, they are not changed. Otherwise opt is set to the
// default values and src is read from the file system.
func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) {
// Use defaults if opt is nil.
if opt == nil {
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
}
// Set the env if the user has not provided it.
if opt.Env == nil {
opt.Env = &ProcessEnv{
GOPATH: build.Default.GOPATH,
GOROOT: build.Default.GOROOT,
GOFLAGS: os.Getenv("GOFLAGS"),
GO111MODULE: os.Getenv("GO111MODULE"),
GOPROXY: os.Getenv("GOPROXY"),
GOSUMDB: os.Getenv("GOSUMDB"),
}
}
// Set the gocmdRunner if the user has not provided it.
if opt.Env.GocmdRunner == nil {
opt.Env.GocmdRunner = &gocommand.Runner{}
}
if src == nil {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, nil, err
}
src = b
}
return src, opt, nil
}
func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(opt.Env, fileSet, file)
sortImports(opt.Env, fileSet, file)
imps := astutil.Imports(fileSet, file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range imps {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
// compatible with gofmt.
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
groupNum := importGroup(opt.Env, importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
lastGroup = groupNum
}
}
printerMode := printer.UseSpaces
if opt.TabIndent {
printerMode |= printer.TabIndent
}
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
err := printConfig.Fprint(&buf, fileSet, file)
if err != nil {
return nil, err
}
out := buf.Bytes()
if adjust != nil {
out = adjust(src, out)
}
if len(spacesBefore) > 0 {
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
if err != nil {
return nil, err
}
}
out, err = format.Source(out)
if err != nil {
return nil, err
}
return out, nil
}
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
// Try as whole source file.
file, err := parser.ParseFile(fset, filename, src, parserMode)
if err == nil {
return file, nil, nil
}
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ;, not a newline, so that parse errors are on
// the correct line.
const prefix = "package main;"
psrc := append([]byte(prefix), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
// Gofmt will turn the ; into a \n.
// Do that ourselves now and update the file contents,
// so that positions and line numbers are correct going forward.
psrc[len(prefix)-1] = '\n'
fset.File(file.Package).SetLinesForContent(psrc)
// If a main function exists, we will assume this is a main
// package and leave the file.
if containsMainFunc(file) {
return file, nil, nil
}
adjust := func(orig, src []byte) []byte {
// Remove the package clause.
src = src[len(prefix):]
return matchSpace(orig, src)
}
return file, adjust, nil
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return nil, nil, err
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src.
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
adjust := func(orig, src []byte) []byte {
// Remove the wrapping.
// Gofmt has turned the ; into a \n\n.
src = src[len("package p\n\nfunc _() {"):]
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
return matchSpace(orig, src)
}
return file, adjust, nil
}
// Failed, and out of options.
return nil, nil, err
}
// containsMainFunc checks if a file contains a function declaration with the
// function signature 'func main()'
func containsMainFunc(file *ast.File) bool {
for _, decl := range file.Decls {
if f, ok := decl.(*ast.FuncDecl); ok {
if f.Name.Name != "main" {
continue
}
if len(f.Type.Params.List) != 0 {
continue
}
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
continue
}
return true
}
}
return false
}
func cutSpace(b []byte) (before, middle, after []byte) {
i := 0
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
i++
}
j := len(b)
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
j--
}
if i <= j {
return b[:i], b[i:j], b[j:]
}
return nil, nil, b[j:]
}
// matchSpace reformats src to use the same space context as orig.
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
// 2) matchSpace copies the indentation of the first non-blank line in orig
// to every non-blank line in src.
// 3) matchSpace copies the trailing space from orig and uses it in place
// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
before, indent := before[:i+1], before[i+1:]
_, src, _ = cutSpace(src)
var b bytes.Buffer
b.Write(before)
for len(src) > 0 {
line := src
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, src = line[:i+1], line[i+1:]
} else {
src = nil
}
if len(line) > 0 && line[0] != '\n' { // not blank
b.Write(indent)
}
b.Write(line)
}
b.Write(after)
return b.Bytes()
}
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
var out bytes.Buffer
in := bufio.NewReader(r)
inImports := false
done := false
for {
s, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if !inImports && !done && strings.HasPrefix(s, "import") {
inImports = true
}
if inImports && (strings.HasPrefix(s, "var") ||
strings.HasPrefix(s, "func") ||
strings.HasPrefix(s, "const") ||
strings.HasPrefix(s, "type")) {
done = true
inImports = false
}
if inImports && len(breaks) > 0 {
if m := impLine.FindStringSubmatch(s); m != nil {
if m[1] == breaks[0] {
out.WriteByte('\n')
breaks = breaks[1:]
}
}
}
fmt.Fprint(&out, s)
}
return out.Bytes(), nil
}
| [
"\"GOFLAGS\"",
"\"GO111MODULE\"",
"\"GOPROXY\"",
"\"GOSUMDB\""
]
| []
| [
"GO111MODULE",
"GOSUMDB",
"GOFLAGS",
"GOPROXY"
]
| [] | ["GO111MODULE", "GOSUMDB", "GOFLAGS", "GOPROXY"] | go | 4 | 0 | |
dj/db_settings.py | import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv("DB_NAME"),
'USER': os.getenv("DB_USER"),
'PASSWORD': os.getenv("DB_PASSWORD"),
'HOST': os.getenv("DB_HOST"),
'PORT': os.getenv("DB_PORT"),
}
}
# DRY Dont repeat yourself
SECRET_KEY = os.getenv("SK")
| []
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_NAME",
"SK",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "SK", "DB_USER"] | python | 6 | 0 | |
config/config.go | package config
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"sync"
"goyave.dev/goyave/v3/helper"
)
type object map[string]interface{}
// Entry is the internal reprensentation of a config entry.
// It contains the entry value, its expected type (for validation)
// and a slice of authorized values (for validation too). If this slice
// is empty, it means any value can be used, provided it is of the correct type.
type Entry struct {
Value interface{}
Type reflect.Kind
IsSlice bool
AuthorizedValues []interface{} // Leave empty for "any"
}
type readFunc func(string) (object, error)
var config object
var configDefaults object = object{
"app": object{
"name": &Entry{"goyave", reflect.String, false, []interface{}{}},
"environment": &Entry{"localhost", reflect.String, false, []interface{}{}},
"debug": &Entry{true, reflect.Bool, false, []interface{}{}},
"defaultLanguage": &Entry{"en-US", reflect.String, false, []interface{}{}},
},
"server": object{
"host": &Entry{"127.0.0.1", reflect.String, false, []interface{}{}},
"domain": &Entry{"", reflect.String, false, []interface{}{}},
"protocol": &Entry{"http", reflect.String, false, []interface{}{"http", "https"}},
"port": &Entry{8080, reflect.Int, false, []interface{}{}},
"httpsPort": &Entry{8081, reflect.Int, false, []interface{}{}},
"timeout": &Entry{10, reflect.Int, false, []interface{}{}},
"maxUploadSize": &Entry{10.0, reflect.Float64, false, []interface{}{}},
"maintenance": &Entry{false, reflect.Bool, false, []interface{}{}},
"tls": object{
"cert": &Entry{nil, reflect.String, false, []interface{}{}},
"key": &Entry{nil, reflect.String, false, []interface{}{}},
},
},
"database": object{
"connection": &Entry{"none", reflect.String, false, []interface{}{}},
"host": &Entry{"127.0.0.1", reflect.String, false, []interface{}{}},
"port": &Entry{3306, reflect.Int, false, []interface{}{}},
"name": &Entry{"goyave", reflect.String, false, []interface{}{}},
"username": &Entry{"root", reflect.String, false, []interface{}{}},
"password": &Entry{"root", reflect.String, false, []interface{}{}},
"options": &Entry{"charset=utf8mb4&collation=utf8mb4_general_ci&parseTime=true&loc=Local", reflect.String, false, []interface{}{}},
"maxOpenConnections": &Entry{20, reflect.Int, false, []interface{}{}},
"maxIdleConnections": &Entry{20, reflect.Int, false, []interface{}{}},
"maxLifetime": &Entry{300, reflect.Int, false, []interface{}{}},
"autoMigrate": &Entry{false, reflect.Bool, false, []interface{}{}},
},
}
var mutex = &sync.RWMutex{}
// Register a new config entry and its validation.
//
// Each module should register its config entries in an "init()"
// function, even if they don't have a default value, in order to
// ensure they will be validated.
// Each module should use its own category and use a name both expressive
// and unique to avoid collisions.
// For example, the "auth" package registers, among others, "auth.basic.username"
// and "auth.jwt.expiry", thus creating a category for its package, and two subcategories
// for its features.
//
// To register an entry without a default value (only specify how it
// will be validated), set "Entry.Value" to "nil".
//
// Panics if an entry already exists for this key and is not identical to the
// one passed as parameter of this function. On the other hand, if the entries
// are identical, no conflict is expected so the configuration is left in its
// current state.
func Register(key string, entry Entry) {
mutex.Lock()
defer mutex.Unlock()
category, entryKey, exists := walk(configDefaults, key)
if exists {
if !reflect.DeepEqual(&entry, category[entryKey].(*Entry)) {
panic(fmt.Sprintf("Attempted to override registered config entry %q", key))
}
} else {
category[entryKey] = &entry
}
}
// Load loads the config.json file in the current working directory.
// If the "GOYAVE_ENV" env variable is set, the config file will be picked like so:
// - "production": "config.production.json"
// - "test": "config.test.json"
// - By default: "config.json"
func Load() error {
return LoadFrom(getConfigFilePath())
}
// LoadFrom loads a config file from the given path.
func LoadFrom(path string) error {
return load(readConfigFile, path)
}
// LoadJSON load a configuration file from raw JSON. Can be used in combination with
// Go's 1.16 embed directive.
//
// var (
// //go:embed config.json
// cfg string
// )
//
// func main() {
// if err := config.LoadJSON(cfg); err != nil {
// goyave.ErrLogger.Println(err)
// os.Exit(goyave.ExitInvalidConfig)
// }
//
// if err := goyave.Start(route.Register); err != nil {
// os.Exit(err.(*goyave.Error).ExitCode)
// }
// }
func LoadJSON(cfg string) error {
return load(readString, cfg)
}
func load(readFunc readFunc, source string) error {
mutex.Lock()
defer mutex.Unlock()
config = make(object, len(configDefaults))
loadDefaults(configDefaults, config)
conf, err := readFunc(source)
if err != nil {
config = nil
return err
}
if err := override(conf, config); err != nil {
config = nil
return err
}
if err := config.validate(""); err != nil {
config = nil
return fmt.Errorf("Invalid config:%s", err.Error())
}
return nil
}
// IsLoaded returns true if the config have been loaded.
func IsLoaded() bool {
mutex.RLock()
defer mutex.RUnlock()
return config != nil
}
// Clear unloads the config.
// DANGEROUS, should only be used for testing.
func Clear() {
mutex.Lock()
config = nil
mutex.Unlock()
}
// Get a config entry. Panics if the entry doesn't exist.
func Get(key string) interface{} {
if val, ok := get(key); ok {
return val
}
panic(fmt.Sprintf("Config entry \"%s\" doesn't exist", key))
}
func get(key string) (interface{}, bool) {
mutex.RLock()
defer mutex.RUnlock()
if config == nil {
panic("Config is not loaded")
}
currentCategory := config
b := 0
e := strings.Index(key, ".")
if e == -1 {
e = len(key)
}
for path := key[b:e]; ; path = key[b:e] {
entry, ok := currentCategory[path]
if !ok {
break
}
if category, ok := entry.(object); ok {
currentCategory = category
} else {
val := entry.(*Entry).Value
return val, val != nil // nil means unset
}
if e+1 <= len(key) {
b = e + 1
newE := strings.Index(key[b:], ".")
if newE == -1 {
e = len(key)
} else {
e = newE + b
}
}
}
return nil, false
}
// GetString a config entry as string.
// Panics if entry is not a string or if it doesn't exist.
func GetString(key string) string {
str, ok := Get(key).(string)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a string", key))
}
return str
}
// GetBool a config entry as bool.
// Panics if entry is not a bool or if it doesn't exist.
func GetBool(key string) bool {
val, ok := Get(key).(bool)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a bool", key))
}
return val
}
// GetInt a config entry as int.
// Panics if entry is not an int or if it doesn't exist.
func GetInt(key string) int {
val, ok := Get(key).(int)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not an int", key))
}
return val
}
// GetFloat a config entry as float64.
// Panics if entry is not a float64 or if it doesn't exist.
func GetFloat(key string) float64 {
val, ok := Get(key).(float64)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a float64", key))
}
return val
}
// GetStringSlice a config entry as []string.
// Panics if entry is not a string slice or if it doesn't exist.
func GetStringSlice(key string) []string {
str, ok := Get(key).([]string)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a string slice", key))
}
return str
}
// GetBoolSlice a config entry as []bool.
// Panics if entry is not a bool slice or if it doesn't exist.
func GetBoolSlice(key string) []bool {
str, ok := Get(key).([]bool)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a bool slice", key))
}
return str
}
// GetIntSlice a config entry as []int.
// Panics if entry is not an int slice or if it doesn't exist.
func GetIntSlice(key string) []int {
str, ok := Get(key).([]int)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not an int slice", key))
}
return str
}
// GetFloatSlice a config entry as []float64.
// Panics if entry is not a float slice or if it doesn't exist.
func GetFloatSlice(key string) []float64 {
str, ok := Get(key).([]float64)
if !ok {
panic(fmt.Sprintf("Config entry \"%s\" is not a float64 slice", key))
}
return str
}
// Has check if a config entry exists.
func Has(key string) bool {
_, ok := get(key)
return ok
}
// Set a config entry.
// The change is temporary and will not be saved for next boot.
// Use "nil" to unset a value.
//
// - A category cannot be replaced with an entry.
// - An entry cannot be replaced with a category.
// - New categories can be created with they don't already exist.
// - New entries can be created if they don't already exist. This new entry
// will be subsequently validated using the type of its initial value and
// have an empty slice as authorized values (meaning it can have any value of its type)
//
// Panics and revert changes in case of error.
func Set(key string, value interface{}) {
mutex.Lock()
defer mutex.Unlock()
if config == nil {
panic("Config is not loaded")
}
category, entryKey, exists := walk(config, key)
if exists {
entry := category[entryKey].(*Entry)
previous := entry.Value
entry.Value = value
if err := entry.validate(key); err != nil {
entry.Value = previous
panic(err)
}
category[entryKey] = entry
} else {
category[entryKey] = makeEntryFromValue(value)
}
}
// walk the config using the key. Returns the deepest category, the entry key
// with its path stripped ("app.name" -> "name") and true if the entry already
// exists, false if it's not registered.
func walk(currentCategory object, key string) (object, string, bool) {
if key == "" {
panic("Empty key is not allowed")
}
if key[len(key)-1:] == "." {
panic("Keys ending with a dot are not allowed")
}
b := 0
e := strings.Index(key, ".")
if e == -1 {
e = len(key)
}
for catKey := key[b:e]; ; catKey = key[b:e] {
entry, ok := currentCategory[catKey]
if !ok {
// If categories are missing, create them
currentCategory = createMissingCategories(currentCategory, key[b:])
i := strings.LastIndex(key, ".")
if i == -1 {
catKey = key
} else {
catKey = key[i+1:]
}
// Entry doesn't exist and is not registered
return currentCategory, catKey, false
}
if category, ok := entry.(object); ok {
currentCategory = category
} else {
if e < len(key) {
panic(fmt.Sprintf("Attempted to add an entry to non-category %q", key[:e]))
}
// Entry exists
return currentCategory, catKey, true
}
if e+1 <= len(key) {
b = e + 1
newE := strings.Index(key[b:], ".")
if newE == -1 {
e = len(key)
} else {
e = newE + b
}
} else {
break
}
}
panic(fmt.Sprintf("Attempted to replace the %q category with an entry", key))
}
// createMissingCategories based on the key path, starting at the given index.
// Doesn't create anything is not needed.
// Returns the deepest category created, or the provided object if nothing has
// been created.
func createMissingCategories(currentCategory object, path string) object {
b := 0
e := strings.Index(path, ".")
if e == -1 {
return currentCategory
}
for catKey := path[b:e]; ; catKey = path[b:e] {
newCategory := object{}
currentCategory[catKey] = newCategory
currentCategory = newCategory
if e+1 <= len(path) {
b = e + 1
newE := strings.Index(path[b:], ".")
if newE == -1 {
return currentCategory
}
e = newE + b
}
}
}
func loadDefaults(src object, dst object) {
for k, v := range src {
if obj, ok := v.(object); ok {
sub := make(object, len(obj))
loadDefaults(obj, sub)
dst[k] = sub
} else {
entry := v.(*Entry)
value := entry.Value
t := reflect.TypeOf(value)
if t != nil && t.Kind() == reflect.Slice {
list := reflect.ValueOf(value)
length := list.Len()
slice := reflect.MakeSlice(reflect.SliceOf(t.Elem()), 0, length)
for i := 0; i < length; i++ {
slice = reflect.Append(slice, list.Index(i))
}
value = slice.Interface()
}
dst[k] = &Entry{value, entry.Type, entry.IsSlice, entry.AuthorizedValues}
}
}
}
func override(src object, dst object) error {
for k, v := range src {
if obj, ok := v.(map[string]interface{}); ok {
if dstObj, ok := dst[k]; !ok {
dst[k] = make(object, len(obj))
} else if _, ok := dstObj.(object); !ok {
// Conflict: destination is not a category
return fmt.Errorf("Invalid config:\n\t- Cannot override entry %q with a category", k)
}
if err := override(obj, dst[k].(object)); err != nil {
return err
}
} else if entry, ok := dst[k]; ok {
e, ok := entry.(*Entry)
if !ok {
// Conflict: override category with an entry
return fmt.Errorf("Invalid config:\n\t- Cannot override category %q with an entry", k)
}
e.Value = v
} else {
// If entry doesn't exist (and is not registered),
// register it with the type of the type given here
// and "any" authorized values.
dst[k] = makeEntryFromValue(v)
}
}
return nil
}
func makeEntryFromValue(value interface{}) *Entry {
isSlice := false
t := reflect.TypeOf(value)
kind := t.Kind()
if kind == reflect.Slice {
kind = t.Elem().Kind()
isSlice = true
}
return &Entry{value, kind, isSlice, []interface{}{}}
}
func readConfigFile(file string) (object, error) {
conf := make(object, len(configDefaults))
configFile, err := os.Open(file)
if err == nil {
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
err = jsonParser.Decode(&conf)
}
return conf, err
}
func readString(str string) (object, error) {
conf := make(object, len(configDefaults))
if err := json.NewDecoder(strings.NewReader(str)).Decode(&conf); err != nil {
return nil, err
}
return conf, nil
}
func getConfigFilePath() string {
env := strings.ToLower(os.Getenv("GOYAVE_ENV"))
if env == "local" || env == "localhost" || env == "" {
return "config.json"
}
return "config." + env + ".json"
}
func (o object) validate(key string) error {
message := ""
valid := true
for k, entry := range o {
var subKey string
if key == "" {
subKey = k
} else {
subKey = key + "." + k
}
if category, ok := entry.(object); ok {
if err := category.validate(subKey); err != nil {
message += err.Error()
valid = false
}
} else if err := entry.(*Entry).validate(subKey); err != nil {
message += "\n\t- " + err.Error()
valid = false
}
}
if !valid {
return fmt.Errorf(message)
}
return nil
}
func (e *Entry) validate(key string) error {
if e.Value == nil { // nil values means unset
return nil
}
if err := e.tryEnvVarConversion(key); err != nil {
return err
}
t := reflect.TypeOf(e.Value)
kind := t.Kind()
if e.IsSlice && kind == reflect.Slice {
kind = t.Elem().Kind()
}
if kind != e.Type {
if !e.tryIntConversion(kind) {
var message string
if e.IsSlice {
message = "%q must be a slice of %s"
} else {
message = "%q type must be %s"
}
return fmt.Errorf(message, key, e.Type)
}
return nil
}
if len(e.AuthorizedValues) > 0 {
if e.IsSlice {
// Accepted values for slices define the values that can be used inside the slice
// It doesn't represent the value of the slice itself (content and order)
list := reflect.ValueOf(e.Value)
length := list.Len()
authorizedValuesList := reflect.ValueOf(e.AuthorizedValues)
for i := 0; i < length; i++ {
if !e.authorizedValuesContains(authorizedValuesList, list.Index(i).Interface()) {
return fmt.Errorf("%q elements must have one of the following values: %v", key, e.AuthorizedValues)
}
}
} else if !helper.Contains(e.AuthorizedValues, e.Value) {
return fmt.Errorf("%q must have one of the following values: %v", key, e.AuthorizedValues)
}
}
return nil
}
// authorizedValuesContains avoids to recreate the reflect.Value of the list for every check
func (e *Entry) authorizedValuesContains(list reflect.Value, value interface{}) bool {
length := list.Len()
for i := 0; i < length; i++ {
if list.Index(i).Interface() == value {
return true
}
}
return false
}
func (e *Entry) tryIntConversion(kind reflect.Kind) bool {
if kind == reflect.Float64 && e.Type == reflect.Int {
if e.IsSlice {
return e.convertIntSlice()
}
intVal, ok := e.convertInt(e.Value.(float64))
if ok {
e.Value = intVal
return true
}
}
return false
}
func (e *Entry) convertInt(value float64) (int, bool) {
intVal := int(value)
if value == float64(intVal) {
return intVal, true
}
return 0, false
}
func (e *Entry) convertIntSlice() bool {
original := e.Value.([]float64)
slice := make([]int, len(original))
for k, v := range original {
intVal, ok := e.convertInt(v)
if !ok {
return false
}
slice[k] = intVal
}
e.Value = slice
return true
}
func (e *Entry) tryEnvVarConversion(key string) error {
str, ok := e.Value.(string)
if ok {
val, err := e.convertEnvVar(str, key)
if err == nil && val != nil {
e.Value = val
}
return err
}
return nil
}
func (e *Entry) convertEnvVar(str, key string) (interface{}, error) {
if strings.HasPrefix(str, "${") && strings.HasSuffix(str, "}") {
varName := str[2 : len(str)-1]
value, set := os.LookupEnv(varName)
if !set {
return nil, fmt.Errorf("%q: %q environment variable is not set", key, varName)
}
switch e.Type {
case reflect.Int:
if i, err := strconv.Atoi(value); err == nil {
return i, nil
}
return nil, fmt.Errorf("%q could not be converted to int from environment variable %q of value %q", key, varName, value)
case reflect.Float64:
if f, err := strconv.ParseFloat(value, 64); err == nil {
return f, nil
}
return nil, fmt.Errorf("%q could not be converted to float64 from environment variable %q of value %q", key, varName, value)
case reflect.Bool:
if b, err := strconv.ParseBool(value); err == nil {
return b, nil
}
return nil, fmt.Errorf("%q could not be converted to bool from environment variable %q of value %q", key, varName, value)
default:
// Keep value as string if type is not supported and let validation do its job
return value, nil
}
}
return nil, nil
}
| [
"\"GOYAVE_ENV\""
]
| []
| [
"GOYAVE_ENV"
]
| [] | ["GOYAVE_ENV"] | go | 1 | 0 | |
sdk/translation/azure-ai-translation-document/samples/sample_check_document_statuses.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_check_document_statuses.py
DESCRIPTION:
This sample demonstrates how to begin translation and then monitor each document's status
and progress.
To set up your containers for translation and generate SAS tokens to your containers (or files)
with the appropriate permissions, see the README.
USAGE:
python sample_check_document_statuses.py
Set the environment variables with your own values before running the sample:
1) AZURE_DOCUMENT_TRANSLATION_ENDPOINT - the endpoint to your Document Translation resource.
2) AZURE_DOCUMENT_TRANSLATION_KEY - your Document Translation API key.
3) AZURE_SOURCE_CONTAINER_URL - the container SAS URL to your source container which has the documents
to be translated.
4) AZURE_TARGET_CONTAINER_URL - the container SAS URL to your target container where the translated documents
will be written.
"""
def sample_document_status_checks():
# [START list_document_statuses]
import os
import time
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document import DocumentTranslationClient
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
poller = client.begin_translation(source_container_url, target_container_url, "es")
completed_docs = []
while not poller.done():
time.sleep(30)
doc_statuses = client.list_document_statuses(poller.id)
for document in doc_statuses:
if document.id not in completed_docs:
if document.status == "Succeeded":
print(f"Document at {document.source_document_url} was translated to {document.translated_to} "
f"language. You can find translated document at {document.translated_document_url}")
completed_docs.append(document.id)
if document.status == "Failed":
print(f"Document at {document.source_document_url} failed translation. "
f"Error Code: {document.error.code}, Message: {document.error.message}")
completed_docs.append(document.id)
if document.status == "Running":
print(f"Document ID: {document.id}, translation progress is "
f"{document.translation_progress * 100} percent")
print("\nTranslation completed.")
# [END list_document_statuses]
if __name__ == '__main__':
sample_document_status_checks()
| []
| []
| [
"AZURE_DOCUMENT_TRANSLATION_KEY",
"AZURE_SOURCE_CONTAINER_URL",
"AZURE_DOCUMENT_TRANSLATION_ENDPOINT",
"AZURE_TARGET_CONTAINER_URL"
]
| [] | ["AZURE_DOCUMENT_TRANSLATION_KEY", "AZURE_SOURCE_CONTAINER_URL", "AZURE_DOCUMENT_TRANSLATION_ENDPOINT", "AZURE_TARGET_CONTAINER_URL"] | python | 4 | 0 | |
src/bindings/python/tests_compatibility/test_inference_engine/test_InferRequest.py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
import threading
from datetime import datetime
import time
from openvino.inference_engine import ie_api as ie
from tests_compatibility.conftest import model_path, image_path, create_encoder
import ngraph as ng
from ngraph.impl import Function, Type
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
path_to_img = image_path()
def create_function_with_memory(input_shape, data_type):
input_data = ng.parameter(input_shape, name="input_data", dtype=data_type)
rv = ng.read_value(input_data, "var_id_667")
add = ng.add(rv, input_data, name="MemoryAdd")
node = ng.assign(add, "var_id_667")
res = ng.result(add, "res")
func = Function(results=[res], sinks=[node], parameters=[input_data], name="name")
caps = Function.to_capsule(func)
return caps
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = image.reshape((n, c, h, w))
return image
def load_sample_model(device, num_requests=1):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=num_requests)
return executable_network
def test_input_blobs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
td = ie.TensorDesc("FP32", (1, 3, 32, 32), "NCHW")
assert executable_network.requests[0].input_blobs['data'].tensor_desc == td
def test_output_blobs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
td = ie.TensorDesc("FP32", (1, 10), "NC")
assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td
def test_inputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._inputs_list) == 1
assert "data" in req._inputs_list
del ie_core
def test_outputs_list(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=2)
for req in executable_network.requests:
assert len(req._outputs_list) == 1
assert "fc_out" in req._outputs_list
del ie_core
def test_access_input_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy()
assert buffer.shape == (1, 3, 32, 32)
assert buffer.strides == (12288, 4096, 128, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_access_output_buffer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy()
assert buffer.shape == (1, 10)
assert buffer.strides == (40, 4)
assert buffer.dtype == np.float32
del executable_network
del ie_core
del net
def test_write_to_input_blobs_directly(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
input_data = request.input_blobs["data"]
input_data.buffer[:] = img
assert np.array_equal(executable_network.requests[0].input_blobs["data"].buffer, img)
del executable_network
del ie_core
del net
def test_write_to_input_blobs_copy(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
executable_network = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = executable_network.requests[0]
request.input_blobs["data"].buffer[:] = img
assert np.allclose(executable_network.requests[0].input_blobs["data"].buffer, img)
del executable_network
del ie_core
del net
def test_infer(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_default_timeout(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait()
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_finish(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_time(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=2)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
start_time = datetime.utcnow()
status = request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
time_delta = datetime.utcnow() - start_time
latency_ms = (time_delta.microseconds / 1000) + (time_delta.seconds * 1000)
timeout = max(100, latency_ms)
request = exec_net.requests[1]
request.async_infer({'data': img})
max_repeat = 10
status = ie.StatusCode.REQUEST_BUSY
i = 0
while i < max_repeat and status != ie.StatusCode.OK:
status = request.wait(timeout)
i += 1
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
del exec_net
del ie_core
del net
def test_async_infer_wait_status(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.async_infer({'data': img})
request.wait(ie.WaitMode.RESULT_READY)
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
status = request.wait(ie.WaitMode.STATUS_ONLY)
assert status == ie.StatusCode.OK
del exec_net
del ie_core
del net
def test_async_infer_fill_inputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.input_blobs['data'].buffer[:] = img
request.async_infer()
status_end = request.wait()
assert status_end == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res[0]) == 2
del exec_net
del ie_core
del net
def test_infer_modify_outputs(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
outputs0 = exec_net.infer({'data': img})
status_end = request.wait()
assert status_end == ie.StatusCode.OK
assert np.argmax(outputs0['fc_out']) == 2
outputs0['fc_out'][:] = np.zeros(shape=(1, 10), dtype=np.float32)
outputs1 = request.output_blobs
assert np.argmax(outputs1['fc_out'].buffer) == 2
outputs1['fc_out'].buffer[:] = np.ones(shape=(1, 10), dtype=np.float32)
outputs2 = request.output_blobs
assert np.argmax(outputs2['fc_out'].buffer) == 2
del exec_net
del ie_core
del net
def test_async_infer_callback(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_before_start(device):
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(callback_called=0)
def callback(self, status):
callback.callback_called = 1
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request = exec_net.requests[0]
request.set_completion_callback(callback)
status = request.wait()
assert status == ie.StatusCode.INFER_NOT_STARTED
request.async_infer({'data': img})
status = request.wait()
assert status == ie.StatusCode.OK
res = request.output_blobs['fc_out'].buffer
assert np.argmax(res) == 2
assert callback.callback_called == 1
del exec_net
del ie_core
def test_async_infer_callback_wait_in_callback(device):
class InferReqWrap:
def __init__(self, request):
self.request = request
self.cv = threading.Condition()
self.request.set_completion_callback(self.callback)
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
assert self.status_code == ie.StatusCode.INFER_NOT_STARTED
def callback(self, statusCode, userdata):
self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
self.cv.acquire()
self.cv.notify()
self.cv.release()
def execute(self, input_data):
self.request.async_infer(input_data)
self.cv.acquire()
self.cv.wait()
self.cv.release()
status = self.request.wait(ie.WaitMode.RESULT_READY)
assert status == ie.StatusCode.OK
assert self.status_code == ie.StatusCode.RESULT_NOT_READY
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
img = read_image()
request_wrap = InferReqWrap(exec_net.requests[0])
request_wrap.execute({'data': img})
del exec_net
del ie_core
def test_async_infer_wait_while_callback_will_not_finish(device):
def callback(status, callback_status):
time.sleep(0.01)
callback_status['finished'] = True
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
callback_status = {}
callback_status['finished'] = False
request = exec_net.requests[0]
request.set_completion_callback(callback, py_data=callback_status)
img = read_image()
request.async_infer({'data': img})
request.wait()
assert callback_status['finished'] == True
def test_get_perf_counts(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.requests[0]
request.infer({'data': img})
pc = request.get_perf_counts()
assert pc['29']["status"] == "EXECUTED"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Dynamic batch fully supported only on CPU")
def test_set_batch_size(device):
ie_core = ie.IECore()
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to dynamic batch isn't supported")
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
data = np.zeros(shape=net.input_info['data'].input_data.shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.requests[0]
request.set_batch(1)
request.infer({'data': data})
assert np.allclose(int(round(request.output_blobs['fc_out'].buffer[0][2])), 1), "Incorrect data for 1st batch"
del exec_net
del ie_core
del net
def test_set_zero_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_set_negative_batch_size(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device, num_requests=1)
request = exec_net.requests[0]
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_blob_setter(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin")
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
net.input_info['data'].layout = "NHWC"
exec_net_2 = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NHWC")
img_blob = ie.Blob(tensor_desc, img)
request = exec_net_2.requests[0]
request.set_blob('data', img_blob)
request.infer()
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
def test_blob_setter_with_preprocess(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
img_blob = ie.Blob(tensor_desc, img)
preprocess_info = ie.PreProcessInfo()
preprocess_info.mean_variant = ie.MeanVariant.MEAN_IMAGE
request = exec_net.requests[0]
request.set_blob('data', img_blob, preprocess_info)
pp = request.preprocess_info["data"]
assert pp.mean_variant == ie.MeanVariant.MEAN_IMAGE
def test_getting_preprocess(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
request = exec_net.requests[0]
preprocess_info = request.preprocess_info["data"]
assert isinstance(preprocess_info, ie.PreProcessInfo)
assert preprocess_info.mean_variant == ie.MeanVariant.NONE
def test_resize_algorithm_work(device):
ie_core = ie.IECore()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
img = read_image()
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
net.input_info['data'].preprocess_info.resize_algorithm = ie.ResizeAlgorithm.RESIZE_BILINEAR
exec_net_2 = ie_core.load_network(net, device)
import cv2
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = image / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = np.expand_dims(image, 0)
tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW")
img_blob = ie.Blob(tensor_desc, image)
request = exec_net_2.requests[0]
assert request.preprocess_info["data"].resize_algorithm == ie.ResizeAlgorithm.RESIZE_BILINEAR
request.set_blob('data', img_blob)
request.infer()
res_2 = np.sort(request.output_blobs['fc_out'].buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
@pytest.mark.parametrize("mode", ["set_init_memory_state", "reset_memory_state", "normal"])
@pytest.mark.parametrize("data_type", ["FP32", "FP16", "I32"])
@pytest.mark.parametrize("input_shape", [[10], [10, 10], [10, 10, 10], [2, 10, 10, 10]])
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Memory layers fully supported only on CPU")
def test_query_state_write_buffer(device, input_shape, data_type, mode):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin")
layout = ["C", "HW", "CHW", "NCHW"]
from openvino.inference_engine import TensorDesc, Blob, format_map
net = ie.IENetwork(create_function_with_memory(input_shape, format_map[data_type]))
ie_core = ie.IECore()
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
request = exec_net.requests[0]
mem_states = request.query_state()
mem_state = mem_states[0]
assert mem_state.name == 'var_id_667'
# todo: Uncomment after fix 45611,
# CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision
#assert mem_state.state.tensor_desc.precision == data_type
for i in range(1, 10):
if mode == "set_init_memory_state":
# create initial value
const_init = 5
init_array = np.full(input_shape, const_init, dtype=format_map[mem_state.state.tensor_desc.precision])
tensor_desc = TensorDesc(mem_state.state.tensor_desc.precision, input_shape, layout[len(input_shape) - 1])
blob = Blob(tensor_desc, init_array)
mem_state.state = blob
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
expected_res = np.full(input_shape, 1 + const_init, dtype=format_map[data_type])
elif mode == "reset_memory_state":
# reset initial state of ReadValue to zero
mem_state.reset()
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
# always ones
expected_res = np.full(input_shape, 1, dtype=format_map[data_type])
else:
res = exec_net.infer({"input_data": np.full(input_shape, 1, dtype=format_map[data_type])})
expected_res = np.full(input_shape, i, dtype=format_map[data_type])
assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \
"Expected values: {} \n Actual values: {} \n".format(expected_res, res)
@pytest.mark.template_plugin
def test_set_blob_with_incorrect_name():
function = create_encoder([4, 4, 20, 20])
net = ng.function_to_cnn(function)
ie_core = ie.IECore()
ie_core.register_plugin("openvino_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc
tensor_desc.dims = [4, 4, 20, 20]
blob = ie.Blob(tensor_desc)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("incorrect_name", blob)
assert f"Failed to find input or output with name: 'incorrect_name'" in str(e.value)
@pytest.mark.template_plugin
def test_set_blob_with_incorrect_size():
function = create_encoder([4, 4, 20, 20])
net = ng.function_to_cnn(function)
ie_core = ie.IECore()
ie_core.register_plugin("openvino_template_plugin", "TEMPLATE")
exec_net = ie_core.load_network(net, "TEMPLATE")
tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc
tensor_desc.dims = [tensor_desc.dims[0]*2, 4, 20, 20]
blob = ie.Blob(tensor_desc)
print(exec_net.requests[0].output_blobs)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("data", blob)
assert f"Input blob size is not equal network input size" in str(e.value)
with pytest.raises(RuntimeError) as e:
exec_net.requests[0].set_blob("out", blob)
assert f"Output blob size is not equal network output size" in str(e.value)
| []
| []
| [
"TEST_DEVICE"
]
| [] | ["TEST_DEVICE"] | python | 1 | 0 | |
vissl/trainer/trainer_main.py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import itertools
import logging
import os
import socket
import time
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torch.distributed as dist
from classy_vision.generic.distributed_util import (
barrier,
is_primary,
set_cpu_device,
set_cuda_device_index,
)
from classy_vision.generic.util import copy_model_to_gpu
from classy_vision.hooks.classy_hook import ClassyHook
from classy_vision.tasks import TASK_REGISTRY, ClassyTask
from vissl.config import AttrDict
from vissl.hooks import SSLClassyHookFunctions
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.trainer.train_steps import get_train_step
from vissl.utils.distributed_utils import all_gather_heterogeneous, all_gather_sizes
from vissl.utils.env import get_machine_local_and_dist_rank
from vissl.utils.io import save_file
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config.TRAINER.TASK_NAME].from_config(config)
return task
class SelfSupervisionTrainer(object):
"""
The main entry point for any training or feature extraction workflows in VISSL.
The trainer constructs a train_task which prepares all the components of the
training (optimizer, loss, meters, model etc) using the settings specified by user
in the yaml config file. See the vissl/trainer/train_task.py for more details.
Args:
cfg (AttrDict): user specified input config that has optimizer, loss, meters etc
settings relevant to the training
dist_run_id (str): For multi-gpu training with PyTorch, we have to specify
how the gpus are going to rendezvous. This requires specifying
the communication method: file, tcp and the unique rendezvous
run_id that is specific to 1 run.
We recommend:
1) for 1node: use init_method=tcp and run_id=auto
2) for multi-node, use init_method=tcp and specify
run_id={master_node}:{port}
checkpoint_path (str): if the training is being resumed from a checkpoint, path to
the checkpoint. The tools/run_distributed_engines.py automatically
looks for the checkpoint in the checkpoint directory.
checkpoint_folder (str): what directory to use for checkpointing. The
tools/run_distributed_engines.py creates the directory based on user
input in the yaml config file.
hooks (List[ClassyHooks]): the list of hooks to use during the training. The hooks
vissl/engines/{train, extract_features}.py determine the hooks.
"""
def __init__(
self,
cfg: AttrDict,
dist_run_id: str,
checkpoint_path: str = None,
checkpoint_folder: str = None,
hooks: List[ClassyHook] = None,
):
self.cfg = cfg
self.dist_run_id = dist_run_id
self.local_rank, self.distributed_rank = get_machine_local_and_dist_rank()
self.setup_distributed(self.cfg.MACHINE.DEVICE == "gpu")
# now we should build the task. The task will also have the State attached
# to it. It will have information about phases (train, test) both. It will
# also contain all the other information like optimizers, etc
self.task = build_task(self.cfg)
self.task.set_checkpoint_path(checkpoint_path)
self.task.set_checkpoint_folder(checkpoint_folder)
if hooks is None:
hooks = []
self.task.set_hooks(hooks)
def setup_distributed(self, use_gpu: bool):
"""
Setup the distributed training. VISSL support both GPU and CPU only training.
(1) Initialize the torch.distributed.init_process_group if the distributed is
not already initialized. The init_method, backend are specified by user in the
yaml config file. See vissl/defaults.yaml file for description on how to set
init_method, backend.
(2) We also set the global cuda device index using torch.cuda.set_device or
cpu device
"""
# we overwrite the distributed trainer setup here with our config options
distributed_world_size = int(os.environ["WORLD_SIZE"])
assert distributed_world_size % self.cfg.DISTRIBUTED.NUM_NODES == 0
init_method = f"{self.cfg.DISTRIBUTED.INIT_METHOD}://{self.dist_run_id}"
logging.info(
f"Using Distributed init method: {init_method}, "
f"world_size: {distributed_world_size}, rank: {self.distributed_rank}"
)
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend=self.cfg.DISTRIBUTED.BACKEND,
init_method=init_method,
world_size=distributed_world_size,
rank=self.distributed_rank,
)
else:
logging.warning(
"Torch distributed has already been initialized, \
reusing existing configuration"
)
logging.info(
"| initialized host {} as rank {} ({})".format(
socket.gethostname(),
self.distributed_rank,
torch.distributed.get_rank(),
)
)
if use_gpu:
set_cuda_device_index(self.local_rank)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available() and (self.cfg.DISTRIBUTED.BACKEND == "nccl"):
dist.all_reduce(torch.zeros(1).cuda())
else:
set_cpu_device()
def train(self):
"""
The train workflow. We get the training loop to use (vissl default is
standard_train_step) but the user can create their own training loop
and specify the name TRAINER.TRAIN_STEP_NAME
The training happens:
1. Execute any hooks at the start of training (mostly resets the variable like
iteration num phase_num etc)
2. For each epoch (train or test), run the hooks at the start of an epoch. Mostly
involves setting things like timer, setting dataloader epoch etc
3. Execute the training loop (1 training iteration) involving forward, loss, backward,
optimizer update, metrics collection etc.
4. At the end of epoch, sync meters and execute hooks at the end of phase. Involves
things like checkpointing model, logging timers, logging to tensorboard etc
"""
train_step_fn = get_train_step(self.cfg["TRAINER"]["TRAIN_STEP_NAME"])
self.task.prepare(pin_memory=self.cfg.DATA.PIN_MEMORY)
self.task.init_distributed_data_parallel_model()
# Find what phase, train_phase_idx, local_iteration_num we are starting from.
# Recover it from the checkpoint (if available)
task, phase_idx, iteration_num = self._init_training_state(self.cfg, self.task)
# Good to go, (re) start training
task.run_hooks(SSLClassyHookFunctions.on_start.name)
if is_primary():
logging.info("Model is:\n {}".format(task.model))
logging.info("Loss is: {}".format(task.loss))
logging.info("Starting training....")
while phase_idx + 1 < len(task.phases):
self._advance_phase(task) # advances task.phase_idx
phase_idx += 1
iteration_num += 1
task.local_iteration_num = iteration_num # iteration_num=0 at this step
task.run_hooks(SSLClassyHookFunctions.on_phase_start.name)
while True:
try:
if self.cfg.MODEL.CUDA_CACHE.CLEAR_CUDA_CACHE and (
iteration_num % self.cfg.MODEL.CUDA_CACHE.CLEAR_FREQ == 0
):
logging.info(
f"Emptying CUDA cache at step count: {iteration_num}"
)
torch.cuda.empty_cache()
logging.info("CUDA cache cleared")
task = train_step_fn(task)
iteration_num += 1
task.local_iteration_num = iteration_num
# Book-keeping: update the training iteration number (only updated
# if it's a training phase).
task.iteration += 1 if task.train else 0
# Book-keeping. Track how many forward passes have been done.
# aka how many batches have been seen by the trainer irrespective of
# the train or test phase.
task.batches += 1
# update the batch time aka the training time for the current iteration.
task.batch_time.append(time.time() - task.start_time)
task.start_time = time.time()
task.run_hooks(SSLClassyHookFunctions.on_step.name)
except StopIteration:
break
except Exception as e:
task.run_hooks(SSLClassyHookFunctions.on_exception.name)
raise e
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
barrier()
task.run_hooks(SSLClassyHookFunctions.on_phase_end.name)
task.run_hooks(SSLClassyHookFunctions.on_end.name)
if hasattr(task, "data_iterator"):
del task.data_iterator
gc.collect()
if hasattr(task, "dataloaders"):
del task.dataloaders
gc.collect()
@staticmethod
def _init_training_state(cfg, task: ClassyTask) -> Tuple[ClassyTask, int, int]:
"""
If a checkpoint is present, recover the current training status.
If not initialize everything properly
Args:
task {ClassyTask}: object consisting of all components a training requires
(meters, optimizers, model, loss etc.)
Returns:
task {ClassyTask}: updated task
phase_idx {int}: phase index
iteration_num: iteration number
"""
phase_idx, iteration_num = -1, -1
# Ensure that train loader exists. Will NOT exist if config.TEST_ONLY is True
if "train" in task.dataloaders.keys():
loader_key = "train"
else:
loader_key = "test"
task.max_iteration = task.num_train_phases * len(task.dataloaders[loader_key])
if task.checkpoint is not None:
phase_idx = task.checkpoint["phase_idx"]
task.train_phase_idx = task.checkpoint["train_phase_idx"]
task.local_iteration_num = task.checkpoint["iteration_num"]
task.iteration = task.checkpoint["iteration"]
else:
task.iteration = 0
task.local_iteration_num = iteration_num
num_iter_in_phase = len(task.dataloaders[loader_key])
num_iter_in_epoch = num_iter_in_phase * task.num_train_phases_per_epoch
num_samples = task.num_phase_samples(loader_key)
task.start_time = time.time()
task.batch_time = []
task.metrics = {}
logging.info(f"Training {task.num_epochs} epochs")
logging.info(f"One epoch = {num_iter_in_epoch} iterations.")
logging.info(f"Total {num_samples} samples in one epoch")
if task.num_epochs != task.num_train_phases:
logging.info(f"Training a total of {task.num_train_phases} train phases.")
logging.info(f"One phase = {num_iter_in_phase} iterations.")
logging.info(f"Total {task.max_iteration} iterations for training")
return task, phase_idx, task.local_iteration_num
def _advance_phase(self, task: ClassyTask):
"""
Advance the training phase to the next phase.
- Updates the phase number,
- resets the meters,
- reset losses,
- recreates the data iterator and destroys previous iterator
- set the model to be in train or eval phase depending on what phase we are in
- execute any optimizer update (normally learning rate updates etc at the end of
an epoch)
"""
# reset the meters at the beginning of the epoch
for meter in task.meters:
meter.reset()
# reset the loss history for this epoch
task.losses = []
# advance the epoch num to be current
task.phase_idx += 1
phase = task.phases[task.phase_idx]
task.train = True if phase["train"] else False
if task.train:
task.train_phase_idx += 1
# get a new data iterator - delete the iterator at the beginning explicitly
# so that all dataloader processes are cleaned up
phase_type = "train" if phase["train"] else "test"
# we are advancing to next epoch, so no need to compute start_iter,
# just let it to be 0 inside of recreate_data_iterator. However, if we are just
# starting from the resumed training, we want to compute_start_iter
# again (if applicable) since we recreate the data iterator and delete
# the old ones.
compute_start_iter = False
if task.checkpoint is not None and task.checkpoint["train_phase_idx"] == (
task.train_phase_idx - 1
):
compute_start_iter = True
task.recreate_data_iterator(
phase_type,
epoch=task.phase_idx,
compute_start_iter=compute_start_iter,
train_phase_idx=task.train_phase_idx,
)
# set the model to train or eval depending on what phase we are in
task.model.train(phase["train"])
if task.train and task.train_phase_idx >= 0:
task.optimizer.on_epoch(task.where)
local_rank, _ = get_machine_local_and_dist_rank()
logging.info(f"Phase advanced. Rank: {local_rank}")
def extract(
self,
output_folder: str,
extract_features: bool = True,
extract_predictions: bool = False,
) -> None:
"""
Extract workflow supports multi-gpu feature extraction and also extracting
predicted labels. Since we are only extracting features or label predictions,
only the model is built (and initialized from some model weights file
if specified by user). Optionally the meters are built if the labels
are being extracted. The model is set to the eval mode fully.
The features / labels are extracted for whatever data splits (train, val, test)
the user wants.
"""
# support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info(f"Model is:\n {self.task.model}")
# Get the names of the features that we are extracting. If user doesn't
# specify the features to evaluate, we get the full model output and freeze
# head/trunk both as caution.
feat_names = get_trunk_output_feature_names(self.cfg.MODEL)
if len(feat_names) == 0:
feat_names = ["heads"]
for split in self.task.available_splits:
logging.info(f"============== Split: {split} =======================")
self.task.data_iterator = iter(self.task.dataloaders[split.lower()])
if extract_features:
logging.info(f"Extracting features for partition: {split.lower()}")
self._extract_split_features(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting features for partition: {split.lower()}")
if extract_predictions:
logging.info(f"Extracting predictions for partition: {split.lower()}")
self._extract_split_label_predictions(
feat_names, self.task, split, output_folder
)
logging.info(f"Done getting predictions for partition: {split.lower()}")
self._cleanup_task()
def _to_unique_feature_names(self, feat_names: List[str]) -> List[str]:
"""
We may have multiple head with different average pooling for
the same features. In case of export, we want to make sure to
export the outputs of these heads with different names.
This function will rename the features in the following way:
["res4", "res4", "res5"] -> ["res4", "res4_1", "res5"]
No effect if there are no duplicate feature names.
"""
counter = {}
new_feat_names = []
for feat_name in feat_names:
index = counter.get(feat_name, 0)
if index > 0:
new_feat_names.append(f"{feat_name}_{index}")
else:
new_feat_names.append(feat_name)
counter[feat_name] = index + 1
return new_feat_names
def _extract_split_label_predictions(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
feat_names = self._to_unique_feature_names(feat_names)
out_predictions, out_targets, out_scores = {}, {}, {}
for feat_name in feat_names:
out_predictions[feat_name] = {}
out_scores[feat_name] = {}
out_targets[feat_name] = {}
assert len(task.meters) > 0, "Please specify one meter to extract predictions"
assert len(task.meters) == 1, "Please use only one meter to extract predictions"
for meter in task.meters:
assert hasattr(
meter, "get_predictions"
), f"Meter {meter.name} doesn't implement get_predictions function"
for count in itertools.count(start=0, step=1):
try:
if count % 100 == 0:
logging.info(f"Label prediction extraction iteration: {count}")
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
model_output = task.model(input_sample["input"])
# get the model predictions using the meter
if isinstance(model_output, list):
model_output_cpu = [x.cpu() for x in model_output]
else:
model_output_cpu = model_output.cpu()
for meter in task.meters:
meter.update(
model_output_cpu, sample["label"][0].detach().cpu()
)
predictions, pred_scores = task.meters[0].get_predictions(
model_output_cpu
)
num_images = input_sample["inds"].shape[0]
for num, layer_name in enumerate(feat_names):
pred = predictions[num]
score = pred_scores[num]
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
if not (index in out_predictions[layer_name]):
out_targets[layer_name][index] = targets[idx].reshape(
-1
)
out_predictions[layer_name][index] = pred[idx]
out_scores[layer_name][index] = score[idx]
except StopIteration:
break
# print the meters results. This can offer a validation
# of the extracted predictions.
self._sync_and_print_meters(task)
# save the predictions, targets and image indices now
self._save_extracted_label_predictions(
predictions=out_predictions,
confidence_scores=out_scores,
targets=out_targets,
dist_rank=dist_rank,
split=split_name,
output_folder=output_folder,
)
@staticmethod
def _save_extracted_label_predictions(
predictions,
confidence_scores,
targets,
dist_rank: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in predictions.keys():
predictions[layer_name] = dict(sorted(predictions[layer_name].items()))
targets[layer_name] = dict(sorted(targets[layer_name].items()))
confidence_scores[layer_name] = dict(
sorted(confidence_scores[layer_name].items())
)
preds = np.array(torch.stack(list(predictions[layer_name].values())))
scores = np.array(torch.stack(list(confidence_scores[layer_name].values())))
N = preds.shape[0]
output[layer_name] = {
"predictions": preds.reshape(N, -1),
"confidence_scores": scores.reshape(N, -1),
"targets": np.array(list(targets[layer_name].values())),
"inds": np.array(list(predictions[layer_name].keys())),
}
split = split.lower()
for layer_name, layer_prediction in output.items():
out_pred_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_predictions.npy"
)
out_scores_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_conf_scores.npy"
)
out_target_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_targets.npy"
)
out_inds_file = (
f"{output_folder}/rank{dist_rank}_{split}_{layer_name}_inds.npy"
)
logging.info(
f"For {layer_name}, "
f"saving predictions: {layer_prediction['predictions'].shape}, "
f"saving scores: {layer_prediction['confidence_scores'].shape}, "
f"targets: {layer_prediction['targets'].shape}, "
f"inds: {layer_prediction['inds'].shape}"
)
save_file(layer_prediction["predictions"], out_pred_file)
save_file(layer_prediction["confidence_scores"], out_scores_file)
save_file(layer_prediction["targets"], out_target_file)
save_file(layer_prediction["inds"], out_inds_file)
def _sync_and_print_meters(self, task):
for meter in task.meters:
meter.sync_state()
logging.info("Meters synced")
if is_primary():
rank, _ = get_machine_local_and_dist_rank()
for meter in task.meters:
if len(task.meters) > 0 and (
(task.train and task.config["METERS"]["enable_training_meter"])
or (not task.train)
):
meter_value = meter.value
metric_key = f"{meter.name}"
if metric_key not in task.metrics:
task.metrics[metric_key] = []
task.metrics[metric_key].append(meter_value)
logging.info(
f"Rank: {rank}, name: {metric_key}, value: {meter_value}"
)
@staticmethod
def _flatten_features_list(features: Dict[str, Any]):
assert isinstance(features, list), "features must be of type list"
is_nested = isinstance(features[0], list)
if is_nested:
flat_features_list = [item for sublist in features for item in sublist]
return flat_features_list
return features
@staticmethod
def _save_extracted_features(
features,
targets,
dist_rank: int,
chunk_index: int,
split: str,
output_folder: str,
):
output = {}
for layer_name in features.keys():
indices = sorted(features[layer_name].keys())
if len(indices) > 0:
output[layer_name] = {
"inds": np.array(indices),
"features": np.array([features[layer_name][i] for i in indices]),
"targets": np.array([targets[layer_name][i] for i in indices]),
}
for layer_name, layer_features in output.items():
out_feat_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_features.npy",
)
out_target_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_targets.npy",
)
out_inds_file = os.path.join(
output_folder,
f"rank{dist_rank}_chunk{chunk_index}_{split.lower()}_{layer_name}_inds.npy",
)
save_file(layer_features["features"], out_feat_file)
save_file(layer_features["targets"], out_target_file)
save_file(layer_features["inds"], out_inds_file)
def _extract_split_features(
self,
feat_names: List[str],
task: ClassyTask,
split_name: str,
output_folder: str,
):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
dist_rank = torch.distributed.get_rank()
out_features, out_targets = {}, {}
for feat_name in feat_names:
out_features[feat_name], out_targets[feat_name] = {}, {}
chunk_index = 0
feature_buffer_size = 0
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"input": torch.cat(sample["data"]).cuda(non_blocking=True),
"target": torch.cat(sample["label"]).cpu().numpy(),
"inds": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["input"])
flat_features_list = self._flatten_features_list(features)
num_images = input_sample["inds"].shape[0]
feature_buffer_size += num_images
for num, feat_name in enumerate(feat_names):
feature = flat_features_list[num].cpu().numpy()
targets = input_sample["target"]
for idx in range(num_images):
index = input_sample["inds"][idx]
out_features[feat_name][index] = feature[idx]
out_targets[feat_name][index] = targets[idx].reshape(-1)
if (
feature_buffer_size
>= self.cfg.EXTRACT_FEATURES.CHUNK_THRESHOLD
>= 0
):
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
for layer_name in out_features.keys():
out_features[layer_name].clear()
chunk_index += 1
feature_buffer_size = 0
except StopIteration:
self._save_extracted_features(
features=out_features,
targets=out_targets,
dist_rank=dist_rank,
chunk_index=chunk_index,
split=split_name,
output_folder=output_folder,
)
break
def _add_dummy_layer(self):
"""
In case of feature evaluation mode, if we are freezing both trunk and
head, DDP won't work as there are no parameters in the model. Adding
the dummy head will lead to features being not right. So we rather
add the dummy layer to the model and use DDP. We copy the model to
gpu (if using gpus) after the new dummy layer addition.
"""
fully_frozen_model = self.task.base_model.is_fully_frozen_model()
if fully_frozen_model:
self.task.base_model.dummy_layer = torch.nn.Linear(4, 4)
if self.task.device.type == "cuda":
self.task.base_model = copy_model_to_gpu(self.task.base_model)
def _cleanup_task(self):
if hasattr(self.task, "data_iterator"):
del self.task.data_iterator
gc.collect()
if hasattr(self.task, "dataloaders"):
del self.task.dataloaders
gc.collect()
def extract_clusters(self) -> Dict[str, Dict[int, int]]:
"""
Workflow to extract multi-gpu cluster extraction for pre-trained models
based on clusterization (SwAV, DeepCluster, etc).
The function returns a map from image index to cluster index for the
whole dataset for each of the different splits.
"""
# Support feature extraction on gpu only.
assert self.task.device.type == "cuda", "Set MACHINE.DEVICE = gpu"
self.task.prepare_extraction(pin_memory=self.cfg.DATA.PIN_MEMORY)
# Assert that the model support extract of clusters
error_message = "Extracting clusters is only available for pre-training methods based on clusters" # NOQA
assert self.task.base_model.is_clustering_model(), error_message
# Create distributed model
self._add_dummy_layer()
self.task.init_distributed_data_parallel_model()
if is_primary():
logging.info("Model is:\n {}".format(self.task.model))
# Compute the cluster assignment on each worker in parallel
cluster_assignment = {}
for split in self.task.available_splits:
msg = f"Extracting cluster assignment for partition: {split}"
logging.info(msg)
cluster_assignment[split] = self._get_cluster_assignment_for_split(
self.task, split
)
logging.info("Done: " + msg)
self._cleanup_task()
# Merge the cluster assignments and group by cluster
return self._merge_cluster_assignments(cluster_assignment)
def _get_cluster_assignment_for_split(self, task: ClassyTask, split: str):
task.model.eval()
logging.info("Model set to eval mode during feature extraction...")
cluster_assignments = {}
task.data_iterator = iter(self.task.dataloaders[split.lower()])
while True:
try:
sample = next(task.data_iterator)
assert isinstance(sample, dict)
assert "data_idx" in sample, "Indices not passed"
input_sample = {
"images": torch.cat(sample["data"]).cuda(non_blocking=True),
"indices": torch.cat(sample["data_idx"]).cpu().numpy(),
}
with torch.no_grad():
features = task.model(input_sample["images"])
features = features[0]
prototype_score = features[1]
prototype_index = prototype_score.argmax(dim=-1)
num_images = input_sample["indices"].shape[0]
for idx in range(num_images):
image_index = input_sample["indices"][idx]
cluster_assignments[image_index] = prototype_index[idx].item()
except StopIteration:
break
return cluster_assignments
@staticmethod
def _merge_cluster_assignments(
rank_cluster_assignment: Dict[str, Dict[int, int]]
) -> Dict[str, Dict[int, int]]:
"""
All gather all the cluster assignments computed by the different workers on
separate parts of the dataset and merge them in a single map
"""
merged_cluster_assignments = {}
for split in rank_cluster_assignment.keys():
split_assignments = list(rank_cluster_assignment[split].items())
image_indices = [assignment[0] for assignment in split_assignments]
image_indices = torch.LongTensor(image_indices).cuda(
torch.cuda.current_device()
)
cluster_indices = [assignment[1] for assignment in split_assignments]
cluster_indices = torch.LongTensor(cluster_indices).cuda(
torch.cuda.current_device()
)
sizes = all_gather_sizes(image_indices)
all_image_indices = all_gather_heterogeneous(sizes, image_indices)
all_cluster_indices = all_gather_heterogeneous(sizes, cluster_indices)
merged_cluster_assignments[split] = {}
for image_indices, cluster_indices in zip(
all_image_indices, all_cluster_indices
):
for image_id, cluster_id in zip(image_indices, cluster_indices):
merged_cluster_assignments[split][
image_id.item()
] = cluster_id.item()
return merged_cluster_assignments
| []
| []
| [
"WORLD_SIZE"
]
| [] | ["WORLD_SIZE"] | python | 1 | 0 | |
src/wsgi.py | """
WSGI config for src project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bccsp/pkcs11/impl.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package pkcs11
import (
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"os"
"github.com/Matrix-Zhang/fabric-gm/bccsp"
"github.com/Matrix-Zhang/fabric-gm/bccsp/sw"
"github.com/Matrix-Zhang/fabric-gm/common/flogging"
"github.com/miekg/pkcs11"
"github.com/pkg/errors"
)
var (
logger = flogging.MustGetLogger("bccsp_p11")
sessionCacheSize = 10
)
// New WithParams returns a new instance of the software-based BCCSP
// set at the passed security level, hash family and KeyStore.
func New(opts PKCS11Opts, keyStore bccsp.KeyStore) (bccsp.BCCSP, error) {
// Init config
conf := &config{}
err := conf.setSecurityLevel(opts.SecLevel, opts.HashFamily)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing configuration")
}
swCSP, err := sw.NewWithParams(opts.SecLevel, opts.HashFamily, keyStore)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing fallback SW BCCSP")
}
// Check KeyStore
if keyStore == nil {
return nil, errors.New("Invalid bccsp.KeyStore instance. It must be different from nil")
}
lib := opts.Library
pin := opts.Pin
label := opts.Label
ctx, slot, session, err := loadLib(lib, pin, label)
if err != nil {
return nil, errors.Wrapf(err, "Failed initializing PKCS11 library %s %s",
lib, label)
}
sessions := make(chan pkcs11.SessionHandle, sessionCacheSize)
csp := &impl{swCSP, conf, keyStore, ctx, sessions, slot, lib, opts.SoftVerify, opts.Immutable, opts.AltId}
csp.returnSession(*session)
return csp, nil
}
type impl struct {
bccsp.BCCSP
conf *config
ks bccsp.KeyStore
ctx *pkcs11.Ctx
sessions chan pkcs11.SessionHandle
slot uint
lib string
softVerify bool
//Immutable flag makes object immutable
immutable bool
// Alternate identifier of the private key
altId string
}
// KeyGen generates a key using opts.
func (csp *impl) KeyGen(opts bccsp.KeyGenOpts) (k bccsp.Key, err error) {
// Validate arguments
if opts == nil {
return nil, errors.New("Invalid Opts parameter. It must not be nil")
}
// Parse algorithm
switch opts.(type) {
case *bccsp.ECDSAKeyGenOpts:
ski, pub, err := csp.generateECKey(csp.conf.ellipticCurve, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
case *bccsp.ECDSAP256KeyGenOpts:
ski, pub, err := csp.generateECKey(oidNamedCurveP256, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA P256 key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
case *bccsp.ECDSAP384KeyGenOpts:
ski, pub, err := csp.generateECKey(oidNamedCurveP384, opts.Ephemeral())
if err != nil {
return nil, errors.Wrapf(err, "Failed generating ECDSA P384 key")
}
k = &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pub}}
default:
return csp.BCCSP.KeyGen(opts)
}
return k, nil
}
// KeyImport imports a key from its raw representation using opts.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) KeyImport(raw interface{}, opts bccsp.KeyImportOpts) (k bccsp.Key, err error) {
// Validate arguments
if raw == nil {
return nil, errors.New("Invalid raw. Cannot be nil")
}
if opts == nil {
return nil, errors.New("Invalid Opts parameter. It must not be nil")
}
switch opts.(type) {
case *bccsp.X509PublicKeyImportOpts:
x509Cert, ok := raw.(*x509.Certificate)
if !ok {
return nil, errors.New("[X509PublicKeyImportOpts] Invalid raw material. Expected *x509.Certificate")
}
pk := x509Cert.PublicKey
switch pk.(type) {
case *ecdsa.PublicKey:
return csp.KeyImport(pk, &bccsp.ECDSAGoPublicKeyImportOpts{Temporary: opts.Ephemeral()})
case *rsa.PublicKey:
return csp.KeyImport(pk, &bccsp.RSAGoPublicKeyImportOpts{Temporary: opts.Ephemeral()})
default:
return nil, errors.New("Certificate's public key type not recognized. Supported keys: [ECDSA, RSA]")
}
default:
return csp.BCCSP.KeyImport(raw, opts)
}
}
// GetKey returns the key this CSP associates to
// the Subject Key Identifier ski.
func (csp *impl) GetKey(ski []byte) (bccsp.Key, error) {
pubKey, isPriv, err := csp.getECKey(ski)
if err == nil {
if isPriv {
return &ecdsaPrivateKey{ski, ecdsaPublicKey{ski, pubKey}}, nil
}
return &ecdsaPublicKey{ski, pubKey}, nil
}
return csp.BCCSP.GetKey(ski)
}
// Sign signs digest using key k.
// The opts argument should be appropriate for the primitive used.
//
// Note that when a signature of a hash of a larger message is needed,
// the caller is responsible for hashing the larger message and passing
// the hash (as digest).
func (csp *impl) Sign(k bccsp.Key, digest []byte, opts bccsp.SignerOpts) ([]byte, error) {
// Validate arguments
if k == nil {
return nil, errors.New("Invalid Key. It must not be nil")
}
if len(digest) == 0 {
return nil, errors.New("Invalid digest. Cannot be empty")
}
// Check key type
switch k.(type) {
case *ecdsaPrivateKey:
return csp.signECDSA(*k.(*ecdsaPrivateKey), digest, opts)
default:
return csp.BCCSP.Sign(k, digest, opts)
}
}
// Verify verifies signature against key k and digest
func (csp *impl) Verify(k bccsp.Key, signature, digest []byte, opts bccsp.SignerOpts) (bool, error) {
// Validate arguments
if k == nil {
return false, errors.New("Invalid Key. It must not be nil")
}
if len(signature) == 0 {
return false, errors.New("Invalid signature. Cannot be empty")
}
if len(digest) == 0 {
return false, errors.New("Invalid digest. Cannot be empty")
}
// Check key type
switch k.(type) {
case *ecdsaPrivateKey:
return csp.verifyECDSA(k.(*ecdsaPrivateKey).pub, signature, digest, opts)
case *ecdsaPublicKey:
return csp.verifyECDSA(*k.(*ecdsaPublicKey), signature, digest, opts)
default:
return csp.BCCSP.Verify(k, signature, digest, opts)
}
}
// Encrypt encrypts plaintext using key k.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) Encrypt(k bccsp.Key, plaintext []byte, opts bccsp.EncrypterOpts) ([]byte, error) {
// TODO: Add PKCS11 support for encryption, when fabric starts requiring it
return csp.BCCSP.Encrypt(k, plaintext, opts)
}
// Decrypt decrypts ciphertext using key k.
// The opts argument should be appropriate for the primitive used.
func (csp *impl) Decrypt(k bccsp.Key, ciphertext []byte, opts bccsp.DecrypterOpts) ([]byte, error) {
return csp.BCCSP.Decrypt(k, ciphertext, opts)
}
// FindPKCS11Lib IS ONLY USED FOR TESTING
// This is a convenience function. Useful to self-configure, for tests where usual configuration is not
// available
func FindPKCS11Lib() (lib, pin, label string) {
//FIXME: Till we workout the configuration piece, look for the libraries in the familiar places
lib = os.Getenv("PKCS11_LIB")
if lib == "" {
pin = "98765432"
label = "ForFabric"
possibilities := []string{
"/usr/lib/softhsm/libsofthsm2.so", //Debian
"/usr/lib/x86_64-linux-gnu/softhsm/libsofthsm2.so", //Ubuntu
"/usr/lib/s390x-linux-gnu/softhsm/libsofthsm2.so", //Ubuntu
"/usr/lib/powerpc64le-linux-gnu/softhsm/libsofthsm2.so", //Power
"/usr/local/Cellar/softhsm/2.1.0/lib/softhsm/libsofthsm2.so", //MacOS
}
for _, path := range possibilities {
if _, err := os.Stat(path); !os.IsNotExist(err) {
lib = path
break
}
}
} else {
pin = os.Getenv("PKCS11_PIN")
label = os.Getenv("PKCS11_LABEL")
}
return lib, pin, label
}
| [
"\"PKCS11_LIB\"",
"\"PKCS11_PIN\"",
"\"PKCS11_LABEL\""
]
| []
| [
"PKCS11_PIN",
"PKCS11_LIB",
"PKCS11_LABEL"
]
| [] | ["PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL"] | go | 3 | 0 | |
pyzoo/zoo/orca/learn/mxnet/mxnet_runner.py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import logging
import subprocess
import ray.services
import mxnet as mx
import numpy as np
from mxnet import gluon
from functools import reduce
from zoo.ray.utils import to_list
class MXNetRunner(object):
"""Manages a MXNet model for training."""
def setup_distributed(self, env, config, model_creator, loss_creator=None,
validation_metrics_creator=None, eval_metrics_creator=None):
logging.basicConfig(level=logging.INFO) # This can print log messages to console.
self.logger = logging.getLogger()
assert isinstance(config, dict), "config must be a dict"
for param in ["batch_size", "optimizer", "optimizer_params", "log_interval"]:
assert param in config, param + " must be specified in config"
self.config = config
self.model_creator = model_creator
self.loss_creator = loss_creator
self.validation_metrics_creator = validation_metrics_creator
self.eval_metircs_creator = eval_metrics_creator
self.is_worker = False
env["DMLC_NODE_HOST"] = self.get_node_ip()
if env["DMLC_ROLE"] == "worker":
self.is_worker = True
if self.is_worker:
os.environ.update(env)
self.kv = mx.kv.create("dist_sync")
# Set seed so that the model on each worker is initialized with the same weights
if "seed" in self.config:
mx.random.seed(self.config["seed"])
self.model = self.model_creator(self.config)
self.loss = self.loss_creator(self.config) if self.loss_creator else None
self.eval_metrics = self.eval_metircs_creator(self.config) \
if self.eval_metircs_creator else None
self.val_metrics = self.validation_metrics_creator(self.config) \
if self.validation_metrics_creator else None
# For BaseModule, use symbolic API. Otherwise, use imperative API.
# TODO: change Gluon Trainer to Estimator API?
if not isinstance(self.model, mx.module.BaseModule):
assert self.loss, "Loss not defined for gluon model, please specify loss_creator"
self.trainer = gluon.Trainer(self.model.collect_params(), self.config["optimizer"],
optimizer_params=self.config["optimizer_params"],
kvstore=self.kv)
else: # Trainer is not needed for symbolic API.
self.trainer = None
else: # server
# Need to use the environment on each raylet process for the correct python environment.
# TODO: Need to kill this process manually?
modified_env = os.environ.copy()
modified_env.update(env)
# For servers, just import mxnet and no need to do anything else
subprocess.Popen("python -c 'import mxnet'", shell=True, env=modified_env)
def train(self, train_data, val_data=None, nb_epoch=1, train_resize_batch_num=None):
"""Train the model and update the model parameters."""
stats = dict()
if self.is_worker:
from zoo.orca.data.shard import RayPartition
if isinstance(train_data, RayPartition):
data, label = get_data_label(train_data.get_data())
train_data_iter = mx.io.NDArrayIter(data=data, label=label,
batch_size=self.config["batch_size"],
shuffle=True)
if train_resize_batch_num is not None:
train_data_iter = mx.io.ResizeIter(train_data_iter, train_resize_batch_num)
if val_data:
data_val, label_val = get_data_label(val_data.get_data())
val_data_iter = mx.io.NDArrayIter(data=data_val, label=label_val,
batch_size=self.config["batch_size"],
shuffle=True)
else:
val_data_iter = None
else: # data_creator functions; should return Iter or DataLoader
train_data_iter = train_data(self.config, self.kv)
val_data_iter = val_data(self.config, self.kv) if val_data else None
start_time = time.time()
if self.trainer: # Imperative API
for epoch in range(nb_epoch):
train_data_iter.reset()
if self.eval_metrics:
self.eval_metrics.reset() # metrics will accumulate for one batch
batch_start_time = time.time()
epoch_start_time = time.time()
for i, batch in enumerate(train_data_iter):
data = gluon.utils.split_and_load(
batch.data[0].astype("float32"), ctx_list=[mx.cpu()], batch_axis=0)
label = gluon.utils.split_and_load(
batch.label[0].astype("float32"), ctx_list=[mx.cpu()], batch_axis=0)
outputs = []
Ls = []
from mxnet import autograd as ag
with ag.record():
for x, y in zip(data, label):
z = self.model(x) # forward
L = self.loss(z, y)
# store the loss and do backward on a batch for better speed
Ls.append(L)
outputs.append(z)
ag.backward(Ls)
self.trainer.step(batch.data[0].shape[0])
if self.eval_metrics:
self.eval_metrics.update(label, outputs)
if not (i + 1) % self.config["log_interval"]:
# This would be logged on driver for each worker process.
iteration_log = \
"Epoch[%d] Batch[%d] Speed: %f samples/sec %s=%f" \
% (epoch, i,
self.config["batch_size"] / (time.time() - batch_start_time),
"loss", Ls[0].asnumpy().mean())
if self.eval_metrics:
names, accs = self.eval_metrics.get()
names, accs = to_list(names), to_list(accs)
for name, acc in zip(names, accs):
iteration_log += " %s=%f" % (name, acc)
self.logger.info(iteration_log)
batch_start_time = time.time()
# Epoch time log
self.logger.info("[Epoch %d] time cost: %f" %
(epoch, time.time() - epoch_start_time))
# Epoch metrics log on train data
if self.eval_metrics:
epoch_train_log = "[Epoch %d] training: " % epoch
names, accs = self.eval_metrics.get()
names, accs = to_list(names), to_list(accs)
for name, acc in zip(names, accs):
epoch_train_log += "%s=%f " % (name, acc)
self.logger.info(epoch_train_log)
# Epoch metrics log on validation data if any:
if val_data_iter:
self.val_metrics.reset()
val_data_iter.reset()
for batch in val_data_iter:
data = gluon.utils.split_and_load(
batch.data[0].astype("float32", copy=False),
ctx_list=[mx.cpu()], batch_axis=0)
label = gluon.utils.split_and_load(
batch.label[0].astype("float32", copy=False),
ctx_list=[mx.cpu()], batch_axis=0)
outputs = [self.model(X) for X in data]
self.val_metrics.update(label, outputs)
epoch_val_log = "[Epoch %d] validation: " % epoch
names, accs = self.val_metrics.get()
names, accs = to_list(names), to_list(accs)
for name, acc in zip(names, accs):
epoch_val_log += "%s=%f " % (name, acc)
self.logger.info(epoch_val_log)
# TODO: save checkpoints
if self.eval_metrics:
names, accs = self.eval_metrics.get()
names, accs = to_list(names), to_list(accs)
for name, acc in zip(names, accs):
stats[name] = acc
else: # Symbolic API
# TODO: seems no history (i.e. validation accuracy) returned by fit?
if "init" not in self.config:
from mxnet.initializer import Uniform
self.config["init"] = Uniform(0.01) # This is the default value for MXNet
if self.eval_metrics is None:
self.eval_metrics = 'acc'
self.model.fit(train_data=train_data_iter,
num_epoch=nb_epoch,
initializer=self.config["init"],
kvstore=self.kv,
optimizer=self.config["optimizer"],
optimizer_params=self.config["optimizer_params"],
eval_data=val_data_iter,
eval_metric=self.eval_metrics,
validation_metric=self.val_metrics,
batch_end_callback=mx.callback.Speedometer(
self.config["batch_size"], self.config["log_interval"]),
epoch_end_callback=None if "model" not in self.config
else mx.callback.do_checkpoint(self.config["model"]))
epoch_time = time.time() - start_time
stats["epoch_time"] = epoch_time
if isinstance(train_data, RayPartition):
del train_data
if val_data and isinstance(val_data, RayPartition):
del val_data
return stats
def shutdown(self):
"""Attempts to shut down the runner."""
del self.logger
if self.is_worker:
del self.kv
del self.model
del self.trainer
del self.loss
del self.eval_metrics
del self.val_metrics
def get_node_ip(self):
"""Returns the IP address of the current node."""
if "node_ip" not in self.__dict__:
self.node_ip = ray.services.get_node_ip_address()
return self.node_ip
def find_free_port(self):
"""Finds a free port on the current node."""
if "port" not in self.__dict__:
from zoo.orca.learn.mxnet.utils import find_free_port
self.port = find_free_port()
return self.port
def get_data_label(partition_data):
def combine_dict(dict1, dict2):
return {key: np.concatenate((value, dict2[key]), axis=0)
for (key, value) in dict1.items()}
def combine_list(list1, list2):
return [np.concatenate((list1[index], list2[index]), axis=0)
for index in range(0, len(list1))]
data_list = [data['x'] for data in partition_data]
label_list = [data['y'] for data in partition_data]
if isinstance(partition_data[0]['x'], dict):
data = reduce(lambda dict1, dict2: combine_dict(dict1, dict2), data_list)
elif isinstance(partition_data[0]['x'], np.ndarray):
data = reduce(lambda array1, array2: np.concatenate((array1, array2), axis=0),
data_list)
elif isinstance(partition_data[0]['x'], list):
data = reduce(lambda list1, list2: combine_list(list1, list2), data_list)
if isinstance(partition_data[0]['y'], dict):
label = reduce(lambda dict1, dict2: combine_dict(dict1, dict2), label_list)
elif isinstance(partition_data[0]['y'], np.ndarray):
label = reduce(lambda array1, array2: np.concatenate((array1, array2), axis=0),
label_list)
elif isinstance(partition_data[0]['y'], list):
label = reduce(lambda list1, list2: combine_list(list1, list2), data_list)
return data, label
| []
| []
| []
| [] | [] | python | 0 | 0 | |
compute/common_instances.go | package compute
import (
"fmt"
"log"
"os"
"strings"
"sync"
"github.com/databrickslabs/databricks-terraform/common"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
)
var (
oncePool sync.Once
commonInstancePool *InstancePoolAndStats
)
// CommonRuntimeVersion presents recommended Spark Version
func CommonRuntimeVersion() string {
return "6.6.x-scala2.11"
}
// CommonInstanceType presents smallest recommended instance type
func CommonInstanceType() string {
cloudEnv := os.Getenv("CLOUD_ENV")
if strings.ToLower(cloudEnv) == "azure" {
return "Standard_DS3_v2"
}
// TODO: create a method on ClustersAPI to give
// cloud specific delta-cache enabled instance by default.
return "m4.large"
}
// CommonInstancePoolID returns common instance pool that is supposed to be used for internal testing purposes
func CommonInstancePoolID() string {
if commonInstancePool != nil {
return commonInstancePool.InstancePoolID
}
client := common.CommonEnvironmentClient()
oncePool.Do(func() { // atomic
log.Printf("[INFO] Initializing common instance pool")
instancePools := NewInstancePoolsAPI(client)
clusters := NewClustersAPI(client)
currentUserPool := fmt.Sprintf("Terraform Integration Test by %s", os.Getenv("USER"))
pools, err := instancePools.List()
if err != nil {
log.Printf("[ERROR] Cannot list instance pools: %v", err)
panic(err)
}
for _, existingPool := range pools.InstancePools {
if existingPool.InstancePoolName == currentUserPool {
log.Printf(
"[INFO] Using existing instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, existingPool.InstancePoolID)
commonInstancePool = &existingPool
return
}
}
instancePool := InstancePool{
PreloadedSparkVersions: []string{CommonRuntimeVersion()},
NodeTypeID: clusters.GetSmallestNodeTypeWithStorage(),
InstancePoolName: currentUserPool,
MaxCapacity: 10,
IdleInstanceAutoTerminationMinutes: 15,
}
if !client.IsAzure() {
instancePool.AwsAttributes = &InstancePoolAwsAttributes{
Availability: AwsAvailabilitySpot,
}
}
newPool, err := instancePools.Create(instancePool)
if err != nil {
log.Printf("[ERROR] Cannot create instance pool: %v", err)
panic(err)
}
log.Printf("[INFO] Created common instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, newPool.InstancePoolID)
commonInstancePool = &newPool
})
return commonInstancePool.InstancePoolID
}
// CommonEnvironmentClientWithRealCommandExecutor is good for internal tests
func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient {
client := common.CommonEnvironmentClient()
client.WithCommandExecutor(NewCommandsAPI(client))
return client
}
// NewTinyClusterInCommonPool creates new cluster for short-lived purposes
func NewTinyClusterInCommonPool() (c ClusterInfo, err error) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
clusters := NewClustersAPI(CommonEnvironmentClientWithRealCommandExecutor())
c, err = clusters.Create(Cluster{
NumWorkers: 1,
ClusterName: "Terraform " + randomName,
SparkVersion: CommonRuntimeVersion(),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
return
}
// NewTinyClusterInCommonPoolPossiblyReused is recommended to be used for testing only
func NewTinyClusterInCommonPoolPossiblyReused() (c ClusterInfo) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
currentCluster := "TerraformIntegrationTest"
clusters := NewClustersAPI(CommonEnvironmentClientWithRealCommandExecutor())
c, err := clusters.GetOrCreateRunningCluster(currentCluster, Cluster{
NumWorkers: 1,
ClusterName: currentCluster,
SparkVersion: CommonRuntimeVersion(),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
if err != nil {
panic(err)
}
return
}
| [
"\"CLOUD_ENV\"",
"\"USER\""
]
| []
| [
"USER",
"CLOUD_ENV"
]
| [] | ["USER", "CLOUD_ENV"] | go | 2 | 0 | |
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.Iterator;
import java.util.Map;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.KEYTAB_LOCATION;
public class ServiceMaster extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceMaster.class);
public static final String YARNFILE_OPTION = "yarnfile";
private static String serviceDefPath;
protected ServiceContext context;
public ServiceMaster(String name) {
super(name);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
printSystemEnv();
context = new ServiceContext();
Path appDir = getAppDir();
context.serviceHdfsDir = appDir.toString();
SliderFileSystem fs = new SliderFileSystem(conf);
context.fs = fs;
fs.setAppDir(appDir);
loadApplicationJson(context, fs);
context.tokens = recordTokensForContainers();
if (UserGroupInformation.isSecurityEnabled()) {
doSecureLogin();
}
// Take yarn config from YarnFile and merge them into YarnConfiguration
for (Map.Entry<String, String> entry : context.service
.getConfiguration().getProperties().entrySet()) {
conf.set(entry.getKey(), entry.getValue());
}
ContainerId amContainerId = getAMContainerId();
ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId();
LOG.info("Service AppAttemptId: " + attemptId);
context.attemptId = attemptId;
// configure AM to wait forever for RM
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
conf.unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS);
DefaultMetricsSystem.initialize("ServiceAppMaster");
context.secretManager = new ClientToAMTokenSecretManager(attemptId, null);
ClientAMService clientAMService = new ClientAMService(context);
context.clientAMService = clientAMService;
addService(clientAMService);
ServiceScheduler scheduler = createServiceScheduler(context);
addService(scheduler);
context.scheduler = scheduler;
ServiceMonitor monitor = new ServiceMonitor("Service Monitor", context);
addService(monitor);
super.serviceInit(conf);
}
// Record the tokens and use them for launching containers.
// e.g. localization requires the hdfs delegation tokens
@VisibleForTesting
protected ByteBuffer recordTokensForContainers() throws IOException {
Credentials copy = new Credentials(UserGroupInformation.getCurrentUser()
.getCredentials());
// Now remove the AM->RM token so that task containers cannot access it.
Iterator<Token<?>> iter = copy.getAllTokens().iterator();
while (iter.hasNext()) {
Token<?> token = iter.next();
LOG.info(token.toString());
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
iter.remove();
}
}
DataOutputBuffer dob = new DataOutputBuffer();
try {
copy.writeTokenStorageToStream(dob);
} finally {
dob.close();
}
return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
}
// 1. First try to use user specified keytabs
// 2. If not specified, then try to use pre-installed keytab at localhost
// 3. strip off hdfs delegation tokens to ensure use keytab to talk to hdfs
private void doSecureLogin()
throws IOException, URISyntaxException {
// read the localized keytab specified by user
File keytab = new File(String.format(KEYTAB_LOCATION,
context.service.getName()));
if (!keytab.exists()) {
LOG.info("No keytab localized at " + keytab);
// Check if there exists a pre-installed keytab at host
String preInstalledKeytab = context.service.getKerberosPrincipal()
.getKeytab();
if (!StringUtils.isEmpty(preInstalledKeytab)) {
URI uri = new URI(preInstalledKeytab);
if (uri.getScheme().equals("file")) {
keytab = new File(uri);
LOG.info("Using pre-installed keytab from localhost: " +
preInstalledKeytab);
}
}
}
if (!keytab.exists()) {
LOG.info("No keytab exists: " + keytab);
return;
}
String principal = context.service.getKerberosPrincipal()
.getPrincipalName();
if (StringUtils.isEmpty((principal))) {
principal = UserGroupInformation.getLoginUser().getShortUserName();
LOG.info("No principal name specified. Will use AM " +
"login identity {} to attempt keytab-based login", principal);
}
Credentials credentials = UserGroupInformation.getCurrentUser()
.getCredentials();
LOG.info("User before logged in is: " + UserGroupInformation
.getCurrentUser());
String principalName = SecurityUtil.getServerPrincipal(principal,
ServiceUtils.getLocalHostName(getConfig()));
UserGroupInformation.loginUserFromKeytab(principalName,
keytab.getAbsolutePath());
// add back the credentials
UserGroupInformation.getCurrentUser().addCredentials(credentials);
LOG.info("User after logged in is: " + UserGroupInformation
.getCurrentUser());
context.principal = principalName;
context.keytab = keytab.getAbsolutePath();
removeHdfsDelegationToken(UserGroupInformation.getLoginUser());
}
// Remove HDFS delegation token from login user and ensure AM to use keytab
// to talk to hdfs
private static void removeHdfsDelegationToken(UserGroupInformation user) {
if (!user.isFromKeytab()) {
LOG.error("AM is not holding on a keytab in a secure deployment:" +
" service will fail when tokens expire");
}
Credentials credentials = user.getCredentials();
Iterator<Token<? extends TokenIdentifier>> iter =
credentials.getAllTokens().iterator();
while (iter.hasNext()) {
Token<? extends TokenIdentifier> token = iter.next();
if (token.getKind().equals(
DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) {
LOG.info("Remove HDFS delegation token {}.", token);
iter.remove();
}
}
}
protected ContainerId getAMContainerId() throws BadClusterStateException {
return ContainerId.fromString(ServiceUtils.mandatoryEnvVariable(
ApplicationConstants.Environment.CONTAINER_ID.name()));
}
protected Path getAppDir() {
return new Path(serviceDefPath).getParent();
}
protected ServiceScheduler createServiceScheduler(ServiceContext context)
throws IOException, YarnException {
return new ServiceScheduler(context);
}
protected void loadApplicationJson(ServiceContext context,
SliderFileSystem fs) throws IOException {
context.service = ServiceApiUtil
.loadServiceFrom(fs, new Path(serviceDefPath));
context.service.setState(ServiceState.ACCEPTED);
LOG.info(context.service.toString());
}
@Override
protected void serviceStart() throws Exception {
LOG.info("Starting service as user " + UserGroupInformation
.getCurrentUser());
UserGroupInformation.getLoginUser().doAs(
(PrivilegedExceptionAction<Void>) () -> {
super.serviceStart();
return null;
}
);
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Stopping app master");
super.serviceStop();
}
// This method should be called whenever there is an increment or decrement
// of a READY state component of a service
public static synchronized void checkAndUpdateServiceState(
ServiceScheduler scheduler, boolean isIncrement) {
ServiceState curState = scheduler.getApp().getState();
if (!isIncrement) {
// set it to STARTED every time a component moves out of STABLE state
scheduler.getApp().setState(ServiceState.STARTED);
} else {
// otherwise check the state of all components
boolean isStable = true;
for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
.getApp().getComponents()) {
if (comp.getState() !=
org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
isStable = false;
break;
}
}
if (isStable) {
scheduler.getApp().setState(ServiceState.STABLE);
} else {
// mark new state as started only if current state is stable, otherwise
// leave it as is
if (curState == ServiceState.STABLE) {
scheduler.getApp().setState(ServiceState.STARTED);
}
}
}
if (curState != scheduler.getApp().getState()) {
LOG.info("Service state changed from {} -> {}", curState,
scheduler.getApp().getState());
}
}
private void printSystemEnv() {
for (Map.Entry<String, String> envs : System.getenv().entrySet()) {
LOG.info("{} = {}", envs.getKey(), envs.getValue());
}
}
public static void main(String[] args) throws Exception {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
org.apache.hadoop.util.StringUtils
.startupShutdownMessage(ServiceMaster.class, args, LOG);
try {
ServiceMaster serviceMaster = new ServiceMaster("Service Master");
ShutdownHookManager.get()
.addShutdownHook(new CompositeServiceShutdownHook(serviceMaster), 30);
YarnConfiguration conf = new YarnConfiguration();
Options opts = new Options();
opts.addOption(YARNFILE_OPTION, true, "HDFS path to JSON service " +
"specification");
opts.getOption(YARNFILE_OPTION).setRequired(true);
GenericOptionsParser parser = new GenericOptionsParser(conf, opts, args);
CommandLine cmdLine = parser.getCommandLine();
serviceMaster.serviceDefPath = cmdLine.getOptionValue(YARNFILE_OPTION);
serviceMaster.init(conf);
serviceMaster.start();
} catch (Throwable t) {
LOG.error("Error starting service master", t);
ExitUtil.terminate(1, "Error starting service master");
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
src/embedding/upload_api-v2.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, json, time, sys, thread, base64
import argparse
import unicodedata
import shutil
import subprocess
import threading
# import dlib
import math
import time
import os.path
import Queue
from threading import Timer
import requests
from collections import defaultdict
from flask import Flask, request, url_for, make_response, abort, Response, jsonify, send_from_directory, redirect
from flask_sqlalchemy import SQLAlchemy
from migrate_db import People, TrainSet, db, AutoGroupSet, Stranger, Frame
from sqlalchemy import exc
#from flask_script import Server, Manager
#from flask_migrate import Migrate, MigrateCommand
#from werkzeug.utils import secure_filename
from uuid import uuid1
import urllib2
from urllib2 import Request, urlopen, URLError, HTTPError
from PIL import Image
#import tensorflow as tf
import numpy as np
from scipy import misc
from math import hypot
from multiprocessing import Process
from collections import OrderedDict
USE_DEFAULT_DATA=True # Enable to use "groupid_default" for SVM training
import facenet
#import clustering_people
from subprocess import Popen, PIPE
import FaceProcessing
from utilslib.mqttClient import MyMQTTClass
from utilslib.persistentUUID import getUUID
from utilslib.save2gst import save2gst, post2gst_motion, post2gst_video
from utilslib.save2gst import sendMessage2Group
from utilslib.getDeviceInfo import deviceId, get_current_groupid, get_deviceid, save_groupid_to_file, check_groupid_changed
from utilslib.qiniuUpload import qiniu_upload_img, qiniu_upload_video, qiniu_upload_data, SUFFIX
# from utilslib.make_a_gif import load_all_images, build_gif, url_to_image
# from utilslib.timer import Timer
from utilslib.clean_droped_data import clean_droped_embedding
from objects.generate_bottlenecks import resize
from faces import save_embedding
from utilslib.resultqueue import push_resultQueue, get_resultQueue
#deeepeye
from celery import Celery
from celery import Task
from billiard import current_process
from celery.signals import worker_process_init
from celery.signals import celeryd_after_setup
from celery.concurrency import asynpool
BASEDIR = os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__)))
TMP_DIR_PATH = os.path.join(BASEDIR, 'data', 'faces', 'tmp_pic_path')
UPLOAD_FOLDER = os.path.join(BASEDIR, 'image')
DATABASE = 'sqlite:///' + os.path.join(BASEDIR, 'data', 'data.sqlite')
face_tmp_objid = None
obje_tmp_objid = None
EN_OBJECT_DETECTION = False
FACE_DETECTION_WITH_DLIB = False # Disable DLIB at this time
EN_SOFTMAX = False
SOFTMAX_ONLY = False
isUpdatingDataSet = False
webShowFace = False
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'bitmap'])
EXT_IMG='png'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# db = SQLAlchemy(app)
db.init_app(app)
ENABLE_DEBUG_LOG_TO_GROUP = False
DO_NOT_UPLOAD_IMAGE = False
DO_NOT_REPORT_TO_SERVER = False
NEAR_FRONTIAL_ONLY = False
image_size = 112
margin = 6
facenet_model = os.path.join(BASEDIR, 'facenet_models/20170512-110547/20170512-110547.pb')
minsize = 50 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
confident_value = 0.67
mineyedist = 0.3 # Eye distance of width of face bounding box
CONFIDENT_VALUE_THRESHOLD = 0.80 #点圈显示的匹配度阈值,大于这个才显示,针对数据库遍历
FOR_ARLO = True
# BLURY_THREHOLD = 10 # Blur image if less than it. Reference: http://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
uploadImg=None
mqttc=None
gbottlenecks=None
trainfromfottlenecks=None
gFlask_port=None
preFrameOnDevice = {}
all_face_index = 0 #每当识别出一个人脸就+1,当2个人同时出现在图片里面并且都不认识,需要区分开来
#deeepeye
asynpool.PROC_ALIVE_TIMEOUT = 60.0 #set this long enough
CLUSTER_REDIS_ADDRESS = os.getenv('CLUSTER_REDIS_ADDRESS','redis')
CLUSTER_REDIS_PORT = os.getenv('CLUSTER_REDIS_PORT','6379')
deepeye = Celery('upload_api-v2',
broker='redis://'+CLUSTER_REDIS_ADDRESS+':'+CLUSTER_REDIS_PORT+'/0',
backend='redis://'+CLUSTER_REDIS_ADDRESS+':'+CLUSTER_REDIS_PORT+'/0')
deepeye.count = 1
# run as worker only
CLUSTER_WORKERONLY = os.getenv('CLUSTER_WORKERONLY', False)
HAS_OPENCL = os.getenv('HAS_OPENCL', 'true')
SAVE_ORIGINAL_FACE = False
original_face_img_path = os.path.join(BASEDIR, 'data', 'original_face_img')
if not os.path.exists(original_face_img_path):
os.mkdir(original_face_img_path)
SVM_CLASSIFIER_ENABLED=True
SVM_SAVE_TEST_DATASET=True
SVM_TRAIN_WITHOUT_CATEGORY=True
SVM_HIGH_SCORE_WITH_DB_CHECK=True
counter = 0
if HAS_OPENCL == 'false':
from embedding_client import get_remote_embedding
def featureCalculation(imgpath):
img = misc.imread(os.path.expanduser(imgpath))
prewhitened = facenet.prewhiten(img)
embedding = FaceProcessing.FaceProcessingImageData2(img)
return embedding
def allowed_file(filename):
"""
检查文件扩展名是否合法
:param filename:
:return: 合法 为 True
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def insertOneImageIntoPeopleDB(filepath, uuid, group_id, objid, url, notFace=False, style="front"):
if notFace is True:
classId = "notface"
else:
classId = objid
if not os.path.exists(filepath):
print("file not exists %s" %(filepath))
return
embedding = featureCalculation2(filepath)
with app.app_context():
people = People(embed=embedding, uuid=uuid, group_id=group_id,
objId=objid, aliyun_url=url, classId=classId, style=style)
db.session.add(people)
db.session.commit()
os.remove(filepath)
return embedding
#For AutoGroup
#AutogroupFilesList = {}
#AutogroupDatasetFilesList = {}
AutogroupDB = None
AutogroupDatasetDB = None
isSyncAutogroupDataset = True
isStartAutogroup = False
AUTOGROUP_UNKNOWNFACES_DB = os.path.join(BASEDIR, 'autogroup_unknownfaces_db.json')
AUTOGROUP_DATASET_DB = os.path.join(BASEDIR, 'autogroup_dataset_db.json')
class MyDB:
def __init__(self, dbpath, isSave=False):
print("MyDB: __init__")
self.isSave = isSave
self.collection = {}
if (os.path.isfile(dbpath)):
with open(dbpath) as fJson:
self.collection = json.load(fJson)
self.dbpath = dbpath
def fetch(self):
return self.collection.copy()
def find(self, key, fields):
return self.collection.get(key, fields)
'''
if key is None:
return {}
if key in self.collection.keys():
if fields is None:
return self.collection[key]
subDic = self.collection[key]
isMatch = True
for subKey, subValue in fields:
if subKey not in subDic.keys() or subValue != subDic[subKey]:
isMatch = False
return {}
if isMatch is True:
return subDic
return {}
'''
def insert(self, key, fields):
self.collection[key] = fields
if self.isSave is True:
self.save()
def update(self, key, fields):
self.collection.update({key:fields})
if self.isSave is True:
self.save()
def remove(self, key):
self.collection.pop(key, "Key not Found!")
if self.isSave is True:
self.save()
def batch_insert(self, items):
print("items={}".format(items))
for key, value in items.items():
if isinstance(value,dict):
self.insert(key, value)
else:
print("batch_insert: invalid data format.")
if self.isSave is True:
self.save()
def save(self):
if self.dbpath is None:
return
with open(self.dbpath, 'w') as fJson:
json.dump(self.collection, fJson)
def AutoGroupSetInsert(obj):
print("test")
def AutoGroupSetUpdate(obj):
print("test")
def AutoGroupSetRemove(obj):
print("test")
def disposeAutoGroupFunc(type, json=None):
global AutogroupDB
global AutogroupDatasetDB
global isSyncAutogroupDataset
global isStartAutogroup
print("disposeAutoGroupFunc: type={}, json={}".format(type, json))
if AutogroupDB is None:
AutogroupDB = MyDB(AUTOGROUP_UNKNOWNFACES_DB)
if AutogroupDatasetDB is None:
AutogroupDatasetDB = MyDB(AUTOGROUP_DATASET_DB)
if type == "dataset":
AutogroupDatasetDB.batch_insert(json)
print("Download autogroup dataset...")
elif type == "syncdataset":
isSyncAutogroupDataset = True
print("Set isSyncAutogroupDataset to True")
elif type == "autogroup":
if json is not None:
AutogroupDB.batch_insert(json)
isStartAutogroup = True
print("Autogroup...")
#Path format: GroupID_FaceId/url_filename
def getFacialImagePath(img_path):
part1 = os.path.basename(os.path.dirname(img_path))
part2 = os.path.basename(img_path)
return part1+"/"+part2
def downloadAutogroupDataset(result, group_id):
failedDownloadedItems = []
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
print('--> {}'.format(faceId))
for url in urls:
#print(' {}'.format(url))
# url,faceid 从点圈群相册获取
# todo 可以用for循环解析群相册获取的json数据
img_url = url['url']
faceid = faceId
style = url['style']
if style != 'front':
#print("style=%s"%style);
continue
#status, embedding = down_img_embedding(img_url, group_id, faceid, style=style)
img_path = save_embedding.get_image_path_dst(img_url, group_id, faceId, style, "autogroup")
#print("img_path = {}".format(img_path))
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = None
if not os.path.exists(img_path):
img_path = save_embedding.download_img_for_svm_dst(img_url, group_id, faceId, style, "autogroup")
if img_path:
if not os.path.exists(embedding_path):
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#print("1, type(embedding)={}".format(type(embedding)))
old_autogroup_set = AutoGroupSet.query.filter_by(url=img_url, group_id=group_id, is_or_isnot=True, style=style).first()
if not old_autogroup_set:
if embedding is None:
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = save_embedding.read_embedding_string(embedding_path)
embedding = np.asarray(embedding)
print("read_embedding_string...........")
print("2, type(embedding)={}".format(type(embedding)))
unique_face_id = ''
if unique_face_id in url:
unique_face_id = url['unique_face_id']
#unique_face_id = url['unique_face_id'] if unique_face_id in url else ''
autoGroupSet = AutoGroupSet(url=img_url, group_id=group_id, is_or_isnot=True,
device_id='', face_id=faceId, unique_face_id=unique_face_id, style=style, filepath=img_path, embed=embedding)
db.session.add(autoGroupSet)
db.session.commit()
print('-> syncAutogroupDataset downloaded url {} to {}'.format(url['url'], img_path))
else:
failedDownloadedItems.append(person)
return failedDownloadedItems
def syncAutogroupDatasetFunc():
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
failedDownloadedItems = downloadAutogroupDataset(result, group_id)
try_count = 0
while len(failedDownloadedItems) > 0:
try_count = try_count+1
print("len(failedDownloadedItems) = {}, try_count={}".format(len(failedDownloadedItems), try_count))
if try_count > 3:
print("We have tried 3 times to download the autogroup dataset.")
break
failedDownloadedItems = downloadAutogroupDataset(failedDownloadedItems, group_id)
#Remove invalid data from local DB
urlsInLocalDB = AutoGroupSet.query.filter_by(group_id=group_id, style="front").all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
urlsOnServer[img_url] = group_id, faceId, style
print("len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("len(urlsOnServer) = {}".format(len(urlsOnServer)))
#print("urlsOnServer = {}".format(urlsOnServer))
if urlsInLocalDB:
for item in urlsInLocalDB:
image_path = None
#print("item = {}, item.url={}".format(item, item.url))
if item.url not in urlsOnServer.keys():
print("{}, {}, {}, {} is not on server, delete it from local DB.".format(item.url, item.group_id, item.face_id, item.style))
if item.filepath:
image_path = item.filepath
db.session.delete(item)
db.session.commit()
if image_path and os.path.isfile(image_path):
print('Remove image from local {}'.format(image_path))
os.remove(image_path)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
print('Remove embedding from local {}:'.format(embedding_path))
os.remove(embedding_path)
#Remove invalid photos from local
'''
dataset = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
if len(dataset) > 0:
for image_path in dataset:
l5 = (item for item in urlsInLocalDB if item.filepath == image_path)
if not l5:
print("image_path({}) only in local.".format(image_path))
if image_path and os.path.exists(image_path):
os.remove(filepath)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
os.remove(embedding_path)
'''
return True
else:
print('response code != 200')
return False
#Sync train data sets
def recover_db(img_url, group_id, faceid, filepath, embedding, style='front'):
# 恢复embedding到db
uuid = get_deviceid()
p = People.query.filter_by(aliyun_url=img_url, group_id=group_id).first()
if not p:
people = People(embed=embedding, uuid=uuid, group_id=group_id,
objId=faceid, aliyun_url=img_url, classId=faceid, style=style)
db.session.add(people)
db.session.commit()
print("Add people")
#return True
#else:
#print("No need add people")
#return False
old_train_set = TrainSet.query.filter_by(url=img_url, group_id=group_id).first() # 一张图片对应的人是唯一的
if not old_train_set:
new_train_set = TrainSet(url=img_url, group_id=group_id, is_or_isnot=True,
device_id='', face_id=faceid, filepath=filepath, drop=False, style=style)
db.session.add(new_train_set)
db.session.commit()
else:
if old_train_set.filepath != filepath:
print("Update filepath in local DB")
TrainSet.query.filter_by(url=img_url, group_id=group_id).update(dict(filepath=filepath))
db.session.commit()
def check_image_valid(filepath):
if filepath is None:
return False
if not os.path.exists(filepath):
print("not found {}".format(filepath))
return False
if os.path.getsize(filepath) < 1:
print("invalid file size {}".format(filepath))
return False
return True
def downloadTrainDatasets(result, group_id):
failedDownloadedItems = []
img_path = None
embedding_path = None
try:
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
print('--> {}'.format(faceId))
for url in urls:
#print(' {}'.format(url))
# url,faceid 从点圈群相册获取
# todo 可以用for循环解析群相册获取的json数据
img_url = url['url']
faceid = faceId
style = url['style']
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
else:
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
continue
else:
style = 'front'
#status, embedding = down_img_embedding(img_url, group_id, faceid, style=style)
print('img_url: ', img_url)
img_path = save_embedding.get_image_path(img_url, group_id, faceId, style)
print("img_path = {}".format(img_path))
embedding_path = save_embedding.get_embedding_path(img_path)
print("embedding_path = {}".format(embedding_path))
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
embedding = None
if not os.path.exists(img_path):
print('img-path not exists ----- ')
img_path = save_embedding.download_img_for_svm(img_url, group_id, faceId, style)
if img_path and check_image_valid(img_path):
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if os.path.exists(denoise_path) is True and check_image_valid(denoise_path) is False:
os.remove(embedding_path)
os.remove(denoise_path)
recreate_embedding = False
continue
if not os.path.exists(embedding_path) or recreate_embedding == True:
img = misc.imread(os.path.expanduser(denoise_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
print('......')
print('img_path: ',img_path)
embedding = featureCalculation2(img_path)
print('----------')
#embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#print("1, type(embedding)={}".format(type(embedding)))
else:
embedding_path = save_embedding.get_embedding_path(img_path)
embedding = save_embedding.read_embedding_string(embedding_path)
embedding = np.asarray(embedding)
recover_db(img_url, group_id, faceid, img_path, embedding, style=style)
#print('-> downloadTrainDatasets downloaded url {} to {}'.format(url['url'], img_path))
else:
if img_path is not None and os.path.exists(img_path):
os.remove(img_path)
failedDownloadedItems.append(person)
except Exception as ex:
print('downloadTrainDatasets: except:', ex)
if img_path and os.path.isfile(img_path):
print('downloadTrainDatasets: Remove image from local {}'.format(img_path))
os.remove(img_path)
if embedding_path and os.path.isfile(embedding_path):
print('downloadTrainDatasets: Remove embedding from local {}'.format(embedding_path))
os.remove(embedding_path)
return failedDownloadedItems
def disposeFinalSyncDatasetsThreadFunc(device_id, toid):
invalid_images_onserver = 0
try:
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
failedDownloadedItems = downloadTrainDatasets(result, group_id)
try_count = 0
while len(failedDownloadedItems) > 0:
try_count = try_count+1
print("len(failedDownloadedItems) = {}, try_count={}".format(len(failedDownloadedItems), try_count))
if try_count > 3:
print("We have tried 3 times to download the training dataset.")
break
failedDownloadedItems = downloadTrainDatasets(failedDownloadedItems, group_id)
#Remove invalid data from local DB
urlsInLocalDB = TrainSet.query.filter_by(group_id=group_id).all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
invalid_images_onserver += 1
continue
urlsOnServer[img_url] = group_id, faceId, style
print("Trainsets: len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: len(urlsOnServer) = {}".format(len(urlsOnServer)))
urlsTemp = {}
deleteUrlsInLocalDB = []
if urlsInLocalDB:
for item in urlsInLocalDB:
image_path = None
#print("item = {}, item.url={}".format(item, item.url))
if (item.url in urlsTemp and urlsTemp[item.url] == 1) or item.url not in urlsOnServer.keys():
print("{}, {}, {}, {} is not on server, delete it from local DB.".format(item.url, item.group_id, item.face_id, item.style))
deleteUrlsInLocalDB.append(item)
if item.filepath:
image_path = item.filepath
db.session.delete(item)
db.session.commit()
if image_path and os.path.isfile(image_path):
print('Remove image from local {}'.format(image_path))
os.remove(image_path)
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
print('Remove embedding from local {}:'.format(embedding_path))
os.remove(embedding_path)
urlsTemp[item.url] = 1
if len(deleteUrlsInLocalDB) > 0:
for item in deleteUrlsInLocalDB:
urlsInLocalDB.remove(item)
urlsTemp = None
print("Trainsets: 2, len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: 2, len(urlsOnServer) = {}".format(len(urlsOnServer)))
#Remove invalid photos from local
dataset = []
style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
# style = 'front'
style = 'front'
path = os.path.dirname(os.path.dirname(save_embedding.get_image_path('http://test/noname', group_id, faceId, style)))
# style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
# style = 'front'
print("path={}".format(path)) #Frank
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
#print("classes={}".format(classes)) #Frank
for i in range(nrof_classes):
class_name = classes[i]
if USE_DEFAULT_DATA is True:
if class_name == "groupid_defaultfaceid":
continue;
facedir = os.path.join(path_exp, class_name)
image_paths = []
print("facedir={}".format(facedir))
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
willRemoveCount = 0
print("len(dataset)={}".format(len(dataset))) #Frank
#print("dataset={}".format(dataset))
#print("urlsInLocalDB={}".format(urlsInLocalDB))
if len(dataset) > 0:
for image_path in dataset:
l5 = (item for item in urlsInLocalDB if item.filepath.replace('front/','') == image_path.replace('front/',''))
count = sum(1 for x in l5)
if count == 0:
print("sum={}".format(count))
willRemoveCount = willRemoveCount+1
print("image_path({}) only in local, remove it.".format(image_path))
if image_path and os.path.exists(image_path):
os.remove(image_path)
print("Remove image_path={}".format(image_path))
embedding_path = save_embedding.get_embedding_path(image_path)
if embedding_path and os.path.isfile(embedding_path):
os.remove(embedding_path)
if len(device_id) > 1 and len(toid) > 1:
message = 'image_path({}) only in local, remove it.'.format(image_path)
print(message)
sendMessage2Group(device_id, toid, message)
if len(device_id) > 1 and len(toid) > 1:
message = 'Stat: localDB={}, server={}/{}, localfiles={}'.format(len(urlsInLocalDB), len(urlsOnServer), invalid_images_onserver, len(dataset)-willRemoveCount)
print(message)
sendMessage2Group(device_id, toid, message)
return True
else:
print('response code != 200')
return False
except Exception as ex:
print('disposeFinalSyncDatasetsThreadFunc: except:', ex)
def disposeSyncStatusInfoThreadFunc(device_id, toid):
invalid_images_onserver = 0
try:
group_id = get_current_groupid()
#host="http://localhost:3000/restapi/datasync/token/" + str(group_id)
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
host = 'http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/datasync/token/' + str(group_id)
result = None
try:
response = urlopen(host, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
return False
except URLError as e:
print('URLError: ', e.reason)
return False
except Exception as e:
print('Error: ', e)
return False
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
#print(result)
result = json.loads(result)
#Remove invalid data from local DB
urlsInLocalDB = TrainSet.query.filter_by(group_id=group_id).all()
urlsOnServer = dict()
for person in result:
faceId = person.get("faceId")
urls = person.get("urls")
for url in urls:
img_url = url['url']
faceid = faceId
style = url['style']
if style == 'left_side' or style == 'right_side' or style == 'lower_head' or style == 'blury':
invalid_images_onserver += 1
continue
urlsOnServer[img_url] = group_id, faceId, style
print("Trainsets: len(urlsInLocalDB) = {}".format(len(urlsInLocalDB)))
print("Trainsets: len(urlsOnServer) = {}".format(len(urlsOnServer)))
#Remove invalid photos from local
dataset = []
# style = ''
# if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
path = os.path.dirname(os.path.dirname(save_embedding.get_image_path('http://test/noname', group_id, faceId, style)))
style = ''
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
print("path={}".format(path)) #Frank
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
#print("classes={}".format(classes)) #Frank
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = []
print("facedir={}".format(facedir))
if os.path.isdir(facedir):
images = os.listdir(facedir)
for img in images:
dataset.append(os.path.join(facedir,img))
if len(device_id) > 1 and len(toid) > 1:
message = 'StatInfo: localDB={}, server={}/{}, localfiles={}'.format(len(urlsInLocalDB), len(urlsOnServer), invalid_images_onserver, len(dataset))
print(message)
sendMessage2Group(device_id, toid, message)
return True
else:
print('response code != 200')
return False
except Exception as ex:
print('disposeSyncStatusInfoThreadFunc: except:', ex)
# @app.before_first_request
def migration():
if os.path.exists('migrate_db.exe'):
out_put = subprocess.check_output(['./migrate_db.exe', 'db', 'upgrade'])
else:
out_put = subprocess.check_output(['python', 'migrate_db.py', 'db', 'upgrade'])
print(out_put)
print('> finish migrate upgrade')
@app.route('/api/status', methods=['GET'])
def get_status():
global isUpdatingDataSet
if isUpdatingDataSet is False:
resp = Response(json.dumps({"status":"alive"}), status=200, mimetype='application/json')
else:
resp = Response(json.dumps({"status":"busy"}), status=401, mimetype='application/json')
return resp
@app.route('/api/images/<filename>', methods=['GET'])
def img(filename):
# p = People.query.filter_by(filename=filename).first()
# if p and p.aliyun_url:
# return redirect(p.aliyun_url)
if os.path.isfile(os.path.join(app.config['UPLOAD_FOLDER'], filename)):
# 返回图片
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
# 返回json
# data = {'img_name': filename, 'img_url': request.url}
# js = json.dumps(data)
# resp = Response(js, status=200, mimetype='application/json')
# return resp
else:
return abort(404)
def format_img_filename(old_filename):
"""
给文件名加上gFlask_port,防止重名
:param old_filename: 旧文件名
:return: new_filename, uuid, ts
"""
ext = old_filename.rsplit('.', 1)[-1]
unix_time = time.time()
uuid = request.args.get('uuid', '')
ts = request.args.get('ts', str(unix_time * 1000))
new_filename = uuid + '_' + str(gFlask_port) + '_' + str(unix_time).replace('.', '') + '_' + str(ts) + '.' + ext
return new_filename, uuid, ts
@app.route('/api/upload_video/', methods=['POST'])
def upload_video():
video_local_path = request.form.get('videopath')
thumbnail_local_path = request.form.get('thumbnail', '')
ts = int(time.time()*1000) # 时间戳
offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone
ts_offset = offset/60/60 * -1 # 时区 8
uuid = request.args.get('uuid', '')
key = uuid + str(ts)
video_src = qiniu_upload_video(key+'video', video_local_path) # 上传本地视频,获取视频播放地址
video_post = qiniu_upload_img(key+'thumbnail', thumbnail_local_path) # 视频封面预览图地址
person_id = request.args.get('objid', '')
if len(video_post) < 1:
video_post = 'http://data.tiegushi.com/fTnmgpdDN4hF9re8F_1493176458747.jpg';
payload = {'uuid': uuid,
'person_id': person_id,
'video_post': video_post,
'video_src': video_src,
'ts': ts,
'ts_offset': ts_offset,
}
post2gst_video(payload)
print('upload_video'.center(50,'-'))
print(payload)
return Response(json.dumps({"result": "ok"}), status=200, mimetype='application/json')
def sendDebugLogToGroup(uuid, current_groupid, message):
if ENABLE_DEBUG_LOG_TO_GROUP is True:
sendMessage2Group(uuid, current_groupid, message)
def showRecognizedImage(image_path, queue_index):
if os.path.exists(image_path):
recognized_img_path = os.path.join(os.path.dirname(image_path), 'face{}.png'.format(queue_index))
shutil.copy(image_path, recognized_img_path)
FACE_COUNT = defaultdict(int)
OBJ_COUNT = 0
def updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason):
isUpdatingDataSet = True
try:
_updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason)
except Exception as ex:
print("updateDataSet error:", ex)
isUpdatingDataSet = False
#raise
isUpdatingDataSet = False
FAILEDDOWNLOADINFOFILE = os.path.join(BASEDIR, 'failed_download_info.json')
FAILEDDOWNLOADINFOFILE2 = os.path.join(BASEDIR, 'failed_download_info2.json')
fileMuxlock = threading.Lock()
def loadFailedDownloadInfo():
failedDownloadInfo = {}
failedDownloadInfo['dInfo'] = []
if (os.path.isfile(FAILEDDOWNLOADINFOFILE)):
with open(FAILEDDOWNLOADINFOFILE) as fJson:
failedDownloadInfo = json.load(fJson)
return failedDownloadInfo
def recordFailedDownload(url, group_id, face_id, style, device_id):
failedDownloadInfo = loadFailedDownloadInfo()
failedDownloadInfo['dInfo'].append({
'url': url,
'group_id': group_id,
'face_id': face_id,
'style': style,
'device_id': device_id
})
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(failedDownloadInfo, fJson)
def loadFailedDownloadList(filepath):
failedDownloadInfo = {}
failedDownloadInfo['dInfo'] = []
if (os.path.isfile(filepath)):
with open(filepath) as fJson:
failedDownloadInfo = json.load(fJson)
return failedDownloadInfo
def addFailedDownloadInfo(url, group_id, face_id, style, device_id):
fileMuxlock.acquire()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
failedDownloadInfo['dInfo'].append({
'url': url,
'group_id': group_id,
'face_id': face_id,
'style': style,
'device_id': device_id
})
print('addFailedDownloadInfo: url='+url)
with open(FAILEDDOWNLOADINFOFILE2, 'w') as fJson:
json.dump(failedDownloadInfo, fJson)
fileMuxlock.release()
def mergeTwoJsonFiles():
fileMuxlock.acquire()
failedDownloadInfo1 = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE)
failedDownloadInfo2 = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
mergedJson = {key: value for (key, value) in (failedDownloadInfo1.items() + failedDownloadInfo2.items())}
if (len(mergedJson['dInfo']) > 0):
print('mergeTwoJsonFiles: mergedJson=')
for key, value in mergedJson.items():
print(key, ':', value)
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(mergedJson, fJson)
if (os.path.isfile(FAILEDDOWNLOADINFOFILE2)):
os.remove(FAILEDDOWNLOADINFOFILE2)
fileMuxlock.release()
def mergeFailedDownloadInfo(json1):
fileMuxlock.acquire()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE2)
mergedJson = {key: value for (key, value) in (json1.items() + failedDownloadInfo.items())}
if (len(mergedJson['dInfo']) > 0):
print('mergeFailedDownloadInfo: mergedJson=')
for key, value in mergedJson.items():
print(key, ':', value)
with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
json.dump(mergedJson, fJson)
if (os.path.isfile(FAILEDDOWNLOADINFOFILE2)):
os.remove(FAILEDDOWNLOADINFOFILE2)
fileMuxlock.release()
def downloadFunc():
global FACE_COUNT
global OBJ_COUNT
while True:
try:
tmpFailedDownloadInfo = {}
tmpFailedDownloadInfo['dInfo'] = []
mergeTwoJsonFiles()
failedDownloadInfo = loadFailedDownloadList(FAILEDDOWNLOADINFOFILE)
for info in failedDownloadInfo['dInfo']:
if SVM_TRAIN_WITHOUT_CATEGORY is True:
info['style'] = 'front'
img_path = save_embedding.get_image_path(info['url'], info['group_id'], info['face_id'], info['style'])
embedding_path = save_embedding.get_embedding_path(img_path)
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
if not os.path.exists(img_path):
img_path = save_embedding.download_img_for_svm(info['url'], info['group_id'], info['face_id'], style=info['style'])
if img_path:
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if not os.path.exists(embedding_path) or recreate_embedding == True:
img = misc.imread(os.path.expanduser(denoise_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
old_train_set = TrainSet.query.filter_by(url=info['url'], group_id=info['group_id'], is_or_isnot=True, style=info['style']).first()
if not old_train_set:
train = TrainSet(url=info['url'], group_id=info['group_id'], is_or_isnot=True,
device_id=info['device_id'], face_id=info['face_id'], filepath=img_path, drop=False, style=info['style'])
db.session.add(train)
db.session.commit()
FACE_COUNT[info['style']] += 1
print('-> SVM {} style face count'.format((FACE_COUNT[info['style']])))
else:
tmpFailedDownloadInfo['dInfo'].append({info})
if (len(tmpFailedDownloadInfo['dInfo']) > 0):
mergeFailedDownloadInfo(tmpFailedDownloadInfo)
#with open(FAILEDDOWNLOADINFOFILE, 'w') as fJson:
# json.dump(failedDownloadInfo, fJson)
elif (os.path.isfile(FAILEDDOWNLOADINFOFILE)):
os.remove(FAILEDDOWNLOADINFOFILE)
except Exception as ex:
print('except:', ex)
time.sleep(5)
tDownload = threading.Thread(target=downloadFunc)
tDownload.daemon = True
tDownload.start()
def dropPersonFunc(group_id, face_id, drop_person):
print('dropPersonFunc, group_id:', group_id, 'face_id:', face_id, 'drop_person:', drop_person)
try:
if drop_person == 'true' or drop_person == 'True' or drop_person == True:
with app.app_context():
train_set = TrainSet.query.filter_by(group_id=group_id, face_id=face_id).all()
dirname = None
for t in train_set:
print('delete db, group_id:', group_id, 'face_id:', face_id, 'url:', t.url)
if t.filepath:
dirname = t.filepath
db.session.delete(t)
db.session.commit()
if dirname:
dirname = dirname.rsplit('/', 1)[0]
print('dropPerson, remove dir:', dirname)
shutil.rmtree(dirname, ignore_errors=True)
except Exception as ex:
print('dropPersonFunc ex:', ex)
def generate_embedding_ifmissing(data_dir):
if not os.path.exists(data_dir):
print("generate_embedding_ifmissing: data_dir is not exists! Please check it.")
dataset = facenet.get_dataset(data_dir)
paths, labels = facenet.get_image_paths_and_labels(dataset)
nrof_images = len(paths)
for i in range(nrof_images):
img_path = paths[i]
embedding_path = save_embedding.get_embedding_path(img_path)
denoise_path = save_embedding.get_image_denoise_path(img_path)
print("denoise_path={}".format(denoise_path))
recreate_embedding = False
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
if not os.path.exists(embedding_path) or recreate_embedding == True:
embedding = featureCalculation2(denoise_path)
save_embedding.create_embedding_string(embedding, embedding_path)
print("Create missing embedding file: {}".format(embedding_path))
def check_default_data(group_id, style):
"""
default_data is face data for SVM training. SVM training need at least two classes.
Check if there is default data. If not, add default data.
:param group_id:
:param style:
:return:
"""
group_path = os.path.join(save_embedding.BASEPATH, group_id, style, save_embedding.img_dir)
'''
class_list = os.listdir(group_path)
for one_class in class_list:
class_id = one_class.split('_')[-1]
# FIXME : Probably need to check all the files for default. Not just existence of image directory
if class_id == 'default':
return
'''
# Copy default face data
default_dir_path = os.path.join(group_path, 'groupid_defaultfaceid')
if not os.path.exists(default_dir_path):
os.mkdir(default_dir_path)
img_path = os.path.join(default_dir_path, 'default_face.png')
if not os.path.isfile(img_path):
default_data_path = os.path.join(BASEDIR, 'faces', 'default_data', 'default_face.png')
shutil.copy(default_data_path, default_dir_path)
# Generate denoise and embedding for default data
img = misc.imread(os.path.expanduser(img_path))
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
'''
denoise_path = save_embedding.get_image_denoise_path(img_path)
save_embedding.save_image_denoise(aligned, denoise_path)
'''
embedding_path = save_embedding.get_embedding_path(img_path)
if not os.path.isfile(embedding_path):
embedding = featureCalculation2(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
#updateDataSet(url=url, objId=face_id, group_id=group_id,drop=drop)
def _updateDataSet(url, objId, group_id, device_id, drop, img_type, sqlId, style, img_ts, rm_reason):
print("> MQTT url:{}, objId:{}, drop:{}, gid:{}, sqlId:{}, style:{}, rm_reason:{}".format(url, objId, drop,
group_id, sqlId, style, rm_reason))
face_id = str(objId)
if style is None:
print('Need to update client app !')
return
styles = style.split('|') # 如 ['left', 'rigth']
global FACE_COUNT
global OBJ_COUNT
print("> MQTT2 url:{}, objId:{}, drop:{}, gid:{}, sqlId:{}, style:{}, rm_reason:{}, group_id:{}, drop:{}, img_type:{}".format(url, objId, drop,
group_id, sqlId, style, rm_reason, group_id, drop, img_type))
if (url is None) or (objId is None) or (group_id is None) or (drop is None) or (img_type is None):
return
if (len(url) < 1) or (len(objId) < 1) or (len(group_id) < 1) or (len(img_type) < 1):
return
if EN_OBJECT_DETECTION is False and img_type == 'object':
return
with app.app_context():
#人脸: 未识别的图片点"删除"/合并的图片点"错"及点"删除", 在这里判断
if img_type == 'face' and sqlId is not None and (drop == 'true' or drop == 'True' or drop == True):
current_dirty_in_db = People.query.filter_by(aliyun_url=url, group_id=group_id).all()
old_dirty_in_db = People.query.filter_by(id=sqlId, uuid=device_id).all()
for d in old_dirty_in_db:
#old_dirty_in_db 是最开始new people时候存的的对比数据
print("remove origin dirty embedding url={}".format(d.aliyun_url))
db.session.delete(d)
db.session.commit()
for t in current_dirty_in_db:
if rm_reason is not None and rm_reason == "notface":
t.classId = "notface"
db.session.add(t)
db.session.commit()
print("update None-face image 1")
continue
#删除当前图片
print("remove current dirty embedding sqlId={}".format(sqlId))
db.session.delete(t)
db.session.commit()
#if SVM_CLASSIFIER_ENABLED is False:
for style in styles:
if style == 'dirty' or style == 'low_pixel' or style == 'blury':
continue
train_set = TrainSet.query.filter_by(url=url, group_id=group_id, style=style).all()
people_in_db = People.query.filter_by(group_id=group_id, aliyun_url=url).all()
if drop == 'true' or drop == 'True' or drop is True:
print(rm_reason)
if len(people_in_db) == 0 and rm_reason is not None and rm_reason == "notface":
print("insert not face image into people db")
url_tmp=url.split('/')
if len(url_tmp) > 0:
imgfilepath = save_embedding.download_img_only(url, 'tempdir')
insertOneImageIntoPeopleDB(imgfilepath, device_id, group_id, objId, url, notFace=True, style=style)
for t in train_set:
t.drop = True
db.session.delete(t)
db.session.commit()
#db.session.delete(t)
#delete the train image
filepath = t.filepath
print('drop train_set db:', filepath)
if filepath and os.path.exists(filepath):
os.remove(filepath)
for t in people_in_db:
if rm_reason is not None and rm_reason == "notface":
t.classId = "notface"
db.session.add(t)
db.session.commit()
print("update None-face image 2")
continue
print('drop people_in_db db & filepath:')
db.session.delete(t)
db.session.commit()
# labeled_img[person_id].remove(url)
else:
embedding = None
if len(people_in_db) == 0:
print("insert into people db")
url_tmp=url.split('/')
if len(url_tmp) > 0:
imgfilepath = save_embedding.download_img_only(url, 'tempdir')
embedding = insertOneImageIntoPeopleDB(imgfilepath, device_id, group_id, objId, url, notFace=False, style=style)
else:
for t in people_in_db:
print('update people_in_db classId %s as %s' %(t.classId, objId))
t.classId = objId
db.session.add(t)
db.session.commit()
old_train_set = TrainSet.query.filter_by(url=url, group_id=group_id, is_or_isnot=True, style=style).first()
print("old_train_set: {}, {}".format(old_train_set, url))
if not old_train_set:
print("insert one in db")
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
train = TrainSet(url=url, group_id=group_id, is_or_isnot=True,
device_id=device_id, face_id=face_id, filepath='', drop=False, style=style)
db.session.add(train)
db.session.commit()
if img_type == 'object' and EN_OBJECT_DETECTION is True:
infile = gbottlenecks.downloadImg(url, group_id, face_id, train.id)
print(infile) # 原图路径
resize(infile)
os.remove(infile) # 保存resized的图片,删除原图
gbottlenecks.createAndCacheBottlenecks()
OBJ_COUNT += 1
train.filepath = infile
elif SVM_CLASSIFIER_ENABLED is True:
img_path = save_embedding.download_img_for_svm(url, group_id, face_id, style=style)
if img_path:
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
denoise_path = save_embedding.get_image_denoise_path(img_path)
save_embedding.save_image_denoise(aligned, denoise_path)
embedding = featureCalculation2(denoise_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
train.filepath = img_path
print('-> insert: SVM {} style face count, url={}'.format((FACE_COUNT[style]), url))
else:
print('download failed, save to json file for future download: url={}'.format(url))
#recordFailedDownload(url, group_id, face_id, style, device_id)
addFailedDownloadInfo(url, group_id, face_id, style, device_id)
else:
print('face')
# 人脸训练过程:标注人脸 > 下载人脸对应URL图片 > 保存对应embedding并转换 > 训练
img_path = save_embedding.download_img(url, group_id, face_id, img_id=train.id, style=style)
img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
aligned = misc.imresize(img, (image_size, image_size), interp='bilinear')
misc.imsave(img_path, aligned)
embedding = featureCalculation2(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
train.filepath = img_path
print('{} style face count'.format((FACE_COUNT[style])))
db.session.add(train)
db.session.commit()
elif old_train_set and old_train_set.face_id != face_id:
print("update one in db, url={}".format(url))
if old_train_set.drop == True:
print("this url is droped")
return
# url中的face不是 xxx
if SVM_TRAIN_WITHOUT_CATEGORY is True:
style = 'front'
old_train_set.is_or_isnot = False
db.session.add(old_train_set)
db.session.commit()
# url中的face是 xxx
new_train_set = TrainSet(url=url, group_id=group_id, is_or_isnot=True, device_id=device_id,
face_id=face_id, style=style)
db.session.add(new_train_set)
db.session.commit()
if img_type == 'object' and EN_OBJECT_DETECTION is True:
infile = gbottlenecks.downloadImg(url, group_id, face_id, new_train_set.id)
resize(infile)
os.remove(infile) # 保存resized的图片,删除原图
gbottlenecks.createAndCacheBottlenecks()
OBJ_COUNT += 1
# 这里需要把老图从本地目录删除掉
old_img_path = infile.replace(str(new_train_set.id)+'.jpg', str(old_train_set.id)+'.jpg')
os.remove(old_img_path)
elif SVM_CLASSIFIER_ENABLED is True:
img_path = save_embedding.download_img_for_svm(url, group_id, face_id, style=style)
if img_path:
denoise_path = save_embedding.get_image_denoise_path(img_path)
recreate_embedding = False
if not os.path.exists(denoise_path):
img = misc.imread(os.path.expanduser(img_path))
save_embedding.save_image_denoise(img, denoise_path)
recreate_embedding = True
embedding_path = save_embedding.get_embedding_path(img_path)
if os.path.isfile(embedding_path) is False:
#img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
#aligned = misc.imresize(img, (image_size, image_size))
#misc.imsave(img_path, aligned)
if embedding is None:
embedding = featureCalculation(denoise_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
print('update: {} style face count, url={}'.format(FACE_COUNT[style], url))
# 这里需要把老图从本地目录删除掉
old_img_path = img_path.replace(str(new_train_set.id) + '.jpg', str(old_train_set.id) + '.jpg')
os.remove(old_img_path)
else:
print('face')
img_path = save_embedding.download_img(url, group_id, face_id, img_id=new_train_set.id, style=style)
#img = misc.imread(os.path.expanduser(img_path)) # 手动裁剪后的图片需要再缩放一下
#aligned = misc.imresize(img, (image_size, image_size))
#misc.imsave(img_path, aligned)
embedding = featureCalculation(img_path)
embedding_path = save_embedding.get_embedding_path(img_path)
save_embedding.create_embedding_string(embedding, embedding_path)
FACE_COUNT[style] += 1
print('{} style face count'.format((FACE_COUNT[style])))
# 这里需要把老图从本地目录删除掉
old_img_path = img_path.replace(str(new_train_set.id) + '.jpg', str(old_train_set.id) + '.jpg')
os.remove(old_img_path)
else:
print("already in dataset")
if USE_DEFAULT_DATA is True:
check_default_data(group_id, style)
if img_type == 'object':
# all_dataset = TrainSet.query.filter_by(group_id=group_id, face_id=face_id, is_or_isnot=True).all()
# cnt = TrainSet.query.filter_by(group_id=group_id, face_id=face_id, is_or_isnot=True).count()
if OBJ_COUNT > 0 and OBJ_COUNT % 20 == 0:
#sendMessage2Group(device_id, group_id, "Training now ...")
clean_droped_embedding(group_id)
print("training now ...")
if os.path.exists('objects/train_obj.exe'):
os.system("./objects/train_obj.exe {} {}".format(deviceId, group_id))
elif os.path.exists('objects/train_obj.pyc'):
os.system("python objects/train_obj.pyc {} {}".format(deviceId, group_id))
else:
os.system("python objects/train_obj.py {} {}".format(deviceId, group_id))
else:
current_groupid = get_current_groupid()
if SVM_CLASSIFIER_ENABLED is True and FACE_COUNT[style] > 0 and FACE_COUNT[style] % 10 == 0:
# #http://sharats.me/the-ever-useful-and-neat-subprocess-module.html
# #https://stackoverflow.com/questions/2837214/python-popen-command-wait-until-the-command-is-finished
if mqttc is not None:
mqttc.train_svm(device_id, current_groupid, "Auto training triggered ...")
'''
clean_droped_embedding(current_groupid)
svm_current_groupid_basepath = os.path.join('data', 'faces', current_groupid)
if len(device_id) > 1 and len(current_groupid) > 1:
sendMessage2Group(device_id, current_groupid, "Auto training triggered ...")
stime = time.time()
# for style in ['left_side', 'right_side', 'front']:
for style in ['front']:
#style = 'front'
svm_train_dataset = os.path.join(svm_current_groupid_basepath, style, 'face_embedding')
if not os.path.exists(svm_train_dataset):
continue
svn_train_pkl = os.path.join(svm_current_groupid_basepath, style, 'classifier_182.pkl')
args_list = ['TRAIN', svm_train_dataset, 'facenet_models/20170512-110547/20170512-110547.pb',
svn_train_pkl, '--batch_size', '1000']
generate_embedding_ifmissing(svm_train_dataset)
ret_val = classifer.train_svm_with_embedding(args_list)
message = "Failed"
if ret_val is None:
message = "Failed"
else:
if ret_val is "OK":
train_cost = round(time.time() - stime,2)
message = '-> Train cost {}s'.format(train_cost)
else:
message = ret_val
print('-> Train {} SVM cost {}s'.format(style, time.time() - stime))
if len(device_id) > 1 and len(current_groupid) > 1:
sendMessage2Group(device_id, current_groupid, message)
'''
elif EN_SOFTMAX is True and FACE_COUNT[style] > 0 and FACE_COUNT[style] % 20 == 0:
clean_droped_embedding(group_id)
print("training on embedding now ...")
if os.path.exists('faces/train_faces.exe'):
output = subprocess.check_output(['./faces/train_faces.exe', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.exe {} {}'.format(current_groupid, style), shell=True)
elif os.path.exists('faces/train_faces.pyc'):
output = subprocess.check_output(['python', 'faces/train_faces.pyc', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.pyc {} {}'.format(current_groupid, style), shell=True)
else:
output = subprocess.check_output(['python', 'faces/train_faces.py', current_groupid, style])
# s = subprocess.Popen('python ./faces/train_faces.py {} {}'.format(current_groupid, style), shell=True)
print(output)
# os.system("python faces/train_faces.py") # 两种外挂训练方式
## 用户手动label时,更新自动标注训练集
# labeled_img = {}
def updata_trainset(json):
print("legacy trainset ignored")
return
# 接收json格式数据
data = json
url = data.get('url')
person_id = data.get('person_id')
device_id = data.get('device_id')
face_id = data.get('face_id')
drop = data.get('drop')
if (url is None) or (person_id is None) or (device_id is None) or (face_id is None) or (drop is None):
return
with app.app_context():
if drop == 'true' or drop == 'True' or drop == True:
train_set = TrainSet.query.filter_by(url=url, device_id=device_id).all()
for t in train_set:
db.session.delete(t)
db.session.commit()
# labeled_img[person_id].remove(url)
else:
old_train_set = TrainSet.query.filter_by(url=url, device_id=device_id, is_or_isnot=True).first() # 一张图片对应的人是唯一的
if old_train_set and old_train_set.face_id != int(face_id):
# url中的face不是 xxx
old_train_set.is_or_isnot = False
db.session.add(old_train_set)
db.session.commit()
# url中的face是 xxx
new_train_set = TrainSet(url=url,
embed=old_train_set.embed,
is_or_isnot=True,
person_id=person_id,
device_id=device_id,
face_id=face_id,
)
db.session.add(new_train_set)
db.session.commit()
print(old_train_set)
print(new_train_set)
# 存储一个单独的字典文件保存手动label过的url
# if not labeled_img.has_key(person_id):
# labeled_img[person_id] = set([])
# labeled_img[person_id].add(url)
@app.route('/api/tablet/', methods=['POST'])
def sync_config():
cmd_type = request.args.get('type', '')
print(cmd_type)
if cmd_type is not None and len(cmd_type) > 1:
if cmd_type == 'group':
uuid = request.args.get('uuid', '')
group_id = request.args.get('group_id', '')
print(uuid)
print(group_id)
if uuid is not None and len(uuid) > 1:
print("uuid=%s got group event, going to reconnect mqtt" %(uuid))
#清空一下group_id,不然不会从服务器重新获取group_id
save_groupid_to_file('')
mqttc.reSubscribeGroup(uuid)
time.sleep(2)
return Response(json.dumps({"result":"ok"}), status=200, mimetype='application/json')
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found ' + request.url,
}
return make_response(json.dumps(message), 404)
# 测试上传
@app.route('/test/upload')
def upload_test():
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post action=/api/images enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--report', dest='report', action='store_true')
parser.add_argument('--no-report', dest='report', action='store_false')
parser.set_defaults(report=True)
parser.add_argument('--port', type=int,
help='The port server listen on', default=5000)
parser.add_argument('--host', type=str,
help='The ip server listen on', default='0.0.0.0')
return parser.parse_args(argv)
def mqttDebugOnOff(MQTTDebugFlag):
global ENABLE_DEBUG_LOG_TO_GROUP
if MQTTDebugFlag is False or MQTTDebugFlag is True:
ENABLE_DEBUG_LOG_TO_GROUP = MQTTDebugFlag
def crons_start():
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(os.path.join(BASEDIR, 'data', 'data.sqlite')):
db.create_all()
svm_face_dataset=None
svm_face_embedding=None
svm_tmp_dir=None
svm_face_testdataset=None
svm_stranger_testdataset=None
def init_fs():
global svm_face_dataset
global svm_face_embedding
global svm_tmp_dir
global svm_face_testdataset
global svm_stranger_testdataset
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
# if not os.path.exists(os.path.join(BASEDIR, 'data.sqlite')):
# db.create_all()
if not os.path.exists(os.path.join(BASEDIR, 'data', 'data.sqlite')):
if os.path.exists(os.path.join(BASEDIR, 'data_init')):
shutil.copyfile(os.path.join(BASEDIR, 'data_init'), os.path.join(BASEDIR, 'data', 'data.sqlite'))
if not os.path.exists(TMP_DIR_PATH):
os.makedirs(TMP_DIR_PATH)
if SVM_CLASSIFIER_ENABLED:
svm_face_dataset = os.path.join(BASEDIR, 'data', 'face_dataset')
svm_face_embedding = os.path.join(BASEDIR, 'data', 'face_embedding')
svm_tmp_dir = os.path.join(BASEDIR, 'data', 'faces', 'noname', 'person')
svm_face_testdataset = os.path.join(BASEDIR, 'data', 'face_testdataset')
svm_stranger_testdataset = os.path.join(BASEDIR, 'data', 'stranger_testdataset')
if not os.path.exists(svm_face_dataset):
os.mkdir(svm_face_dataset)
if not os.path.exists(svm_face_embedding):
os.mkdir(svm_face_embedding)
if not os.path.exists(svm_tmp_dir):
os.makedirs(svm_tmp_dir)
if not os.path.exists(svm_face_testdataset):
os.mkdir(svm_face_testdataset)
if not os.path.exists(svm_stranger_testdataset):
os.mkdir(svm_stranger_testdataset)
def init_mqtt_client():
#TODO: UUID when no eth0/wlan0
device_id = get_deviceid()
mqttc = MyMQTTClass(device_id + str(5000))
mqttc.initialize(updata_trainset, disposeAutoGroupFunc)
mqttc.registerUpateTrainsetHandle(updateDataSet)
mqttc.registerMQTTDebugOnOffHandle(mqttDebugOnOff)
mqttc.registerDropPersonHandle(dropPersonFunc)
mqttc.registerMQTTFinalSyncDatasetsHandle(disposeFinalSyncDatasetsThreadFunc)
mqttc.registerMQTTSyncStatusInfoHandle(disposeSyncStatusInfoThreadFunc)
mqttc.registerMQTTGenerateEmbeddingIfMissingHandle(generate_embedding_ifmissing)
mqttc.start()
def update_frame_db(camera_id=None, device_id=None, group_id=None, blury=None, img_path=None, img_style=None, accuracy=None, url=None, num_face=None, tracking_id=None, time_stamp=None, tracking_flag=None):
#uuid = db.Column(db.String(64))
#group_id = db.Column(db.String(64))
#blury = db.Column(db.Integer)
#img_path = db.Column(db.String(128))
#img_style = db.Column(db.String(64))
#accuracy = db.Column(db.Float)
#url = db.Column(db.String(128))
#num_face = db.Column(db.Integer)
#tracking_id = db.Column(db.String(64))
#device_id = db.Column(db.String(64))
#time_stamp = db.Column(db.Integer)
#tracking_flag = db.Column(db.String(64))
if img_path is None or group_id is None:
return
with app.app_context():
frame = Frame.query.filter_by(group_id=group_id, img_path=img_path).first()
if frame is None:
new_frame = Frame(camera_id=camera_id, group_id=group_id, blury=blury, img_path=img_path,
img_style=img_style, accuracy=accuracy, url=url, num_face=num_face,
tracking_id=tracking_id, device_id=device_id, time_stamp=time_stamp, tracking_flag=tracking_flag)
db.session.add(new_frame)
print("insert in db: {}".format(new_frame))
else:
if blury is not None:
frame.blury = blury
if img_style is not None:
frame.img_style = img_style
if accuracy is not None:
frame.accuracy = accuracy
if url is not None:
frame.url = url
if num_face is not None:
frame.num_face = num_face
if tracking_id is not None:
frame.tracking_id = tracking_id
if time_stamp is not None:
frame.time_stamp = time_stamp
if tracking_flag is not None:
frame.tracking_flag = tracking_flag
db.session.add(frame)
print("update db: {}".format(frame))
db.session.commit()
def getQueueName():
if os.environ is not None and 'WORKER_TYPE' in os.environ.keys():
return os.environ['WORKER_TYPE']
return ""
def featureCalculation2(imgpath):
embedding=None
if HAS_OPENCL == 'false':
with open(imgpath, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
embedding = get_remote_embedding(encoded_string)
else:
embedding = FaceProcessing.FaceProcessingImageData2(imgpath)
return embedding
@worker_process_init.connect()
def setup(sender=None, **kwargs):
global mqttc
# setup
print('done initializing <<< ==== be called Per Fork/Process')
_type=getQueueName()
if _type == "embedding":
check_groupid_changed()
init_fs()
if HAS_OPENCL == 'true':
mod = FaceProcessing.init_embedding_processor()
print("start to warm up")
embedding = featureCalculation2(os.path.join(BASEDIR,"image","Mike_Alden_0001_tmp.png"))
print("warmed up")
#if embedding is not None:
# print("worker embedding ready")
init_mqtt_client()
return "detect"
class FaceDetectorTask(Task):
def __init__(self):
self._model = 'testing'
self._type = getQueueName()
print(">>> {}".format(self._type))
@deepeye.task
def extract_v2(image):
# print(">>> extract() {} ".format(image))
imgstring=image["base64data"]
imgpath=image["path"]
style=image["style"]
blury=image["blury"]
ts=image["ts"]
trackerid=image["trackerid"]
totalPeople=image["totalPeople"]
uuid = get_deviceid()
current_groupid = get_current_groupid()
if current_groupid is None:
return json.dumps({"embedding_path":"","error":"please join group"})
if HAS_OPENCL == 'false':
embedding = get_remote_embedding(imgstring)
else:
embedding = FaceProcessing.FaceProcessingBase64ImageData2(imgstring)
embedding_path=''
embedding_str=''
if embedding is not None:
if type(trackerid) is not str:
trackerid = str(trackerid)
embedding_str = save_embedding.convert_embedding_to_string(embedding)
return json.dumps({"embedding_str":embedding_str})
else:
return json.dumps({"error":"please check your configuration"})
deepeye.conf.task_routes = {
'upload_api-v2.extract_v2': {'queue': 'embedding'}
}
if __name__ == '__main__':
deepeye.start()
| []
| []
| [
"CLUSTER_REDIS_ADDRESS",
"CLUSTER_WORKERONLY",
"RUNTIME_BASEDIR",
"API_SERVER_ADDRESS",
"HAS_OPENCL",
"WORKER_TYPE",
"CLUSTER_REDIS_PORT",
"API_SERVER_PORT"
]
| [] | ["CLUSTER_REDIS_ADDRESS", "CLUSTER_WORKERONLY", "RUNTIME_BASEDIR", "API_SERVER_ADDRESS", "HAS_OPENCL", "WORKER_TYPE", "CLUSTER_REDIS_PORT", "API_SERVER_PORT"] | python | 8 | 0 | |
cmd/prometheus/main.go | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The main package for the Prometheus server executable.
package main
import (
"context"
"fmt"
"io"
"math"
"math/bits"
"net"
"net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"net/url"
"os"
"os/signal"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/oklog/run"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/version"
toolkit_web "github.com/prometheus/exporter-toolkit/web"
toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
jcfg "github.com/uber/jaeger-client-go/config"
jprom "github.com/uber/jaeger-lib/metrics/prometheus"
"go.uber.org/atomic"
kingpin "gopkg.in/alecthomas/kingpin.v2"
klog "k8s.io/klog"
klogv2 "k8s.io/klog/v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/logging"
"github.com/prometheus/prometheus/pkg/relabel"
prom_runtime "github.com/prometheus/prometheus/pkg/runtime"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/prometheus/web"
)
var (
configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_config_last_reload_successful",
Help: "Whether the last configuration reload attempt was successful.",
})
configSuccessTime = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_config_last_reload_success_timestamp_seconds",
Help: "Timestamp of the last successful configuration reload.",
})
defaultRetentionString = "15d"
defaultRetentionDuration model.Duration
)
func init() {
prometheus.MustRegister(version.NewCollector("prometheus"))
var err error
defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString)
if err != nil {
panic(err)
}
}
type flagConfig struct {
configFile string
localStoragePath string
notifier notifier.Options
forGracePeriod model.Duration
outageTolerance model.Duration
resendDelay model.Duration
web web.Options
tsdb tsdbOptions
lookbackDelta model.Duration
webTimeout model.Duration
queryTimeout model.Duration
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline model.Duration
featureList []string
// These options are extracted from featureList
// for ease of use.
enablePromQLAtModifier bool
enablePromQLNegativeOffset bool
prometheusURL string
corsRegexString string
promlogConfig promlog.Config
}
// setFeatureListOptions sets the corresponding options from the featureList.
func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
maxExemplars := c.tsdb.MaxExemplars
// Disabled at first. Value from the flag is used if exemplar-storage is set.
c.tsdb.MaxExemplars = 0
for _, f := range c.featureList {
opts := strings.Split(f, ",")
for _, o := range opts {
switch o {
case "promql-at-modifier":
c.enablePromQLAtModifier = true
level.Info(logger).Log("msg", "Experimental promql-at-modifier enabled")
case "promql-negative-offset":
c.enablePromQLNegativeOffset = true
level.Info(logger).Log("msg", "Experimental promql-negative-offset enabled")
case "remote-write-receiver":
c.web.RemoteWriteReceiver = true
level.Info(logger).Log("msg", "Experimental remote-write-receiver enabled")
case "exemplar-storage":
c.tsdb.MaxExemplars = maxExemplars
level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled")
case "":
continue
default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
}
}
}
return nil
}
func main() {
if os.Getenv("DEBUG") != "" {
runtime.SetBlockProfileRate(20)
runtime.SetMutexProfileFraction(20)
}
var (
oldFlagRetentionDuration model.Duration
newFlagRetentionDuration model.Duration
)
cfg := flagConfig{
notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer,
},
web: web.Options{
Registerer: prometheus.DefaultRegisterer,
Gatherer: prometheus.DefaultGatherer,
},
promlogConfig: promlog.Config{},
}
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
a.Version(version.Print("prometheus"))
a.HelpFlag.Short('h')
a.Flag("config.file", "Prometheus configuration file path.").
Default("prometheus.yml").StringVar(&cfg.configFile)
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
webConfig := toolkit_webflag.AddFlags(a)
a.Flag("web.read-timeout",
"Maximum duration before timing out read of the request, and closing idle connections.").
Default("5m").SetValue(&cfg.webTimeout)
a.Flag("web.max-connections", "Maximum number of simultaneous connections.").
Default("512").IntVar(&cfg.web.MaxConnections)
a.Flag("web.external-url",
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.").
PlaceHolder("<URL>").StringVar(&cfg.prometheusURL)
a.Flag("web.route-prefix",
"Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").
PlaceHolder("<path>").StringVar(&cfg.web.RoutePrefix)
a.Flag("web.user-assets", "Path to static asset directory, available at /user.").
PlaceHolder("<path>").StringVar(&cfg.web.UserAssetsPath)
a.Flag("web.enable-lifecycle", "Enable shutdown and reload via HTTP request.").
Default("false").BoolVar(&cfg.web.EnableLifecycle)
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
a.Flag("web.console.libraries", "Path to the console library directory.").
Default("console_libraries").StringVar(&cfg.web.ConsoleLibrariesPath)
a.Flag("web.page-title", "Document title of Prometheus instance.").
Default("Prometheus Time Series Collection and Processing Server").StringVar(&cfg.web.PageTitle)
a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`).
Default(".*").StringVar(&cfg.corsRegexString)
a.Flag("storage.tsdb.path", "Base path for metrics storage.").
Default("data/").StringVar(&cfg.localStoragePath)
a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration)
a.Flag("storage.tsdb.max-block-duration",
"Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)").
Hidden().PlaceHolder("<duration>").SetValue(&cfg.tsdb.MaxBlockDuration)
a.Flag("storage.tsdb.wal-segment-size",
"Size at which to split the tsdb WAL segment files. Example: 100MB").
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
SetValue(&oldFlagRetentionDuration)
a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
SetValue(&newFlagRetentionDuration)
a.Flag("storage.tsdb.retention.size", "[EXPERIMENTAL] Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". This flag is experimental and can be changed in future releases.").
BytesVar(&cfg.tsdb.MaxBytes)
a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
a.Flag("storage.tsdb.allow-overlapping-blocks", "[EXPERIMENTAL] Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Default("true").BoolVar(&cfg.tsdb.WALCompression)
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit)
a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit)
a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame)
a.Flag("storage.exemplars.exemplars-limit", "[EXPERIMENTAL] Maximum number of exemplars to store in in-memory exemplar storage total. 0 disables the exemplar storage. This flag is effective only with --enable-feature=exemplar-storage.").
Default("100000").IntVar(&cfg.tsdb.MaxExemplars)
a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
Default("1h").SetValue(&cfg.outageTolerance)
a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
Default("10m").SetValue(&cfg.forGracePeriod)
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to 2ms to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
// TODO: Remove in Prometheus 3.0.
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
Default("5m").SetValue(&cfg.lookbackDelta)
a.Flag("query.timeout", "Maximum time a query may take before being aborted.").
Default("2m").SetValue(&cfg.queryTimeout)
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
Default("20").IntVar(&cfg.queryConcurrency)
a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
Default("50000000").IntVar(&cfg.queryMaxSamples)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: 'promql-at-modifier' to enable the @ modifier, 'remote-write-receiver' to enable remote write receiver, 'exemplar-storage' to enable the in-memory exemplar storage. See https://prometheus.io/docs/prometheus/latest/disabled_features/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)
_, err := a.Parse(os.Args[1:])
if err != nil {
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing commandline arguments"))
a.Usage(os.Args[1:])
os.Exit(2)
}
logger := promlog.New(&cfg.promlogConfig)
if err := cfg.setFeatureListOptions(logger); err != nil {
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing feature list"))
os.Exit(1)
}
cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress)
if err != nil {
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL))
os.Exit(2)
}
cfg.web.CORSOrigin, err = compileCORSRegexString(cfg.corsRegexString)
if err != nil {
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "could not compile CORS regex string %q", cfg.corsRegexString))
os.Exit(2)
}
if *alertmanagerTimeout != "" {
level.Warn(logger).Log("msg", "The flag --alertmanager.timeout has no effect and will be removed in the future.")
}
// Throw error for invalid config before starting other components.
if _, err := config.LoadFile(cfg.configFile); err != nil {
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err)
os.Exit(2)
}
// Now that the validity of the config is established, set the config
// success metrics accordingly, although the config isn't really loaded
// yet. This will happen later (including setting these metrics again),
// but if we don't do it now, the metrics will stay at zero until the
// startup procedure is complete, which might take long enough to
// trigger alerts about an invalid config.
configSuccess.Set(1)
configSuccessTime.SetToCurrentTime()
cfg.web.ReadTimeout = time.Duration(cfg.webTimeout)
// Default -web.route-prefix to path of -web.external-url.
if cfg.web.RoutePrefix == "" {
cfg.web.RoutePrefix = cfg.web.ExternalURL.Path
}
// RoutePrefix must always be at least '/'.
cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
{ // Time retention settings.
if oldFlagRetentionDuration != 0 {
level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
}
// When the new flag is set it takes precedence.
if newFlagRetentionDuration != 0 {
cfg.tsdb.RetentionDuration = newFlagRetentionDuration
}
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
cfg.tsdb.RetentionDuration = defaultRetentionDuration
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
}
// Check for overflows. This limits our max retention to 100y.
if cfg.tsdb.RetentionDuration < 0 {
y, err := model.ParseDuration("100y")
if err != nil {
panic(err)
}
cfg.tsdb.RetentionDuration = y
level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String())
}
}
{ // Max block size settings.
if cfg.tsdb.MaxBlockDuration == 0 {
maxBlockDuration, err := model.ParseDuration("31d")
if err != nil {
panic(err)
}
// When the time retention is set and not too big use to define the max block duration.
if cfg.tsdb.RetentionDuration != 0 && cfg.tsdb.RetentionDuration/10 < maxBlockDuration {
maxBlockDuration = cfg.tsdb.RetentionDuration / 10
}
cfg.tsdb.MaxBlockDuration = maxBlockDuration
}
}
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
// Above level 6, the k8s client would log bearer tokens in clear-text.
klog.ClampLevel(6)
klog.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
klogv2.ClampLevel(6)
klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
level.Info(logger).Log("msg", "Starting Prometheus", "version", version.Info())
if bits.UintSize < 64 {
level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
}
level.Info(logger).Log("build_context", version.BuildContext())
level.Info(logger).Log("host_details", prom_runtime.Uname())
level.Info(logger).Log("fd_limits", prom_runtime.FdLimits())
level.Info(logger).Log("vm_limits", prom_runtime.VMLimits())
var (
localStorage = &readyStorage{}
scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
var (
ctxWeb, cancelWeb = context.WithCancel(context.Background())
ctxRule = context.Background()
notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
ctxScrape, cancelScrape = context.WithCancel(context.Background())
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape"))
ctxNotify, cancelNotify = context.WithCancel(context.Background())
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), discovery.Name("notify"))
scrapeManager = scrape.NewManager(log.With(logger, "component", "scrape manager"), fanoutStorage)
opts = promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"),
Reg: prometheus.DefaultRegisterer,
MaxSamples: cfg.queryMaxSamples,
Timeout: time.Duration(cfg.queryTimeout),
ActiveQueryTracker: promql.NewActiveQueryTracker(cfg.localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
LookbackDelta: time.Duration(cfg.lookbackDelta),
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
EnableAtModifier: cfg.enablePromQLAtModifier,
EnableNegativeOffset: cfg.enablePromQLNegativeOffset,
}
queryEngine = promql.NewEngine(opts)
ruleManager = rules.NewManager(&rules.ManagerOptions{
Appendable: fanoutStorage,
Queryable: localStorage,
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
NotifyFunc: sendAlerts(notifierManager, cfg.web.ExternalURL.String()),
Context: ctxRule,
ExternalURL: cfg.web.ExternalURL,
Registerer: prometheus.DefaultRegisterer,
Logger: log.With(logger, "component", "rule manager"),
OutageTolerance: time.Duration(cfg.outageTolerance),
ForGracePeriod: time.Duration(cfg.forGracePeriod),
ResendDelay: time.Duration(cfg.resendDelay),
})
)
scraper.Set(scrapeManager)
cfg.web.Context = ctxWeb
cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration
cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes
cfg.web.TSDBDir = cfg.localStoragePath
cfg.web.LocalStorage = localStorage
cfg.web.Storage = fanoutStorage
cfg.web.ExemplarStorage = localStorage
cfg.web.QueryEngine = queryEngine
cfg.web.ScrapeManager = scrapeManager
cfg.web.RuleManager = ruleManager
cfg.web.Notifier = notifierManager
cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
cfg.web.Version = &web.PrometheusVersion{
Version: version.Version,
Revision: version.Revision,
Branch: version.Branch,
BuildUser: version.BuildUser,
BuildDate: version.BuildDate,
GoVersion: version.GoVersion,
}
cfg.web.Flags = map[string]string{}
// Exclude kingpin default flags to expose only Prometheus ones.
boilerplateFlags := kingpin.New("", "").Version("")
for _, f := range a.Model().Flags {
if boilerplateFlags.GetFlag(f.Name) != nil {
continue
}
cfg.web.Flags[f.Name] = f.Value.String()
}
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
// Monitor outgoing connections on default transport with conntrack.
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
conntrack.DialWithTracing(),
)
reloaders := []reloader{
{
name: "remote_storage",
reloader: remoteStorage.ApplyConfig,
}, {
name: "web_handler",
reloader: webHandler.ApplyConfig,
}, {
name: "query_engine",
reloader: func(cfg *config.Config) error {
if cfg.GlobalConfig.QueryLogFile == "" {
queryEngine.SetQueryLogger(nil)
return nil
}
l, err := logging.NewJSONFileLogger(cfg.GlobalConfig.QueryLogFile)
if err != nil {
return err
}
queryEngine.SetQueryLogger(l)
return nil
},
}, {
// The Scrape and notifier managers need to reload before the Discovery manager as
// they need to read the most updated config when receiving the new targets list.
name: "scrape",
reloader: scrapeManager.ApplyConfig,
}, {
name: "scrape_sd",
reloader: func(cfg *config.Config) error {
c := make(map[string]discovery.Configs)
for _, v := range cfg.ScrapeConfigs {
c[v.JobName] = v.ServiceDiscoveryConfigs
}
return discoveryManagerScrape.ApplyConfig(c)
},
}, {
name: "notify",
reloader: notifierManager.ApplyConfig,
}, {
name: "notify_sd",
reloader: func(cfg *config.Config) error {
c := make(map[string]discovery.Configs)
for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() {
c[k] = v.ServiceDiscoveryConfigs
}
return discoveryManagerNotify.ApplyConfig(c)
},
}, {
name: "rules",
reloader: func(cfg *config.Config) error {
// Get all rule files matching the configuration paths.
var files []string
for _, pat := range cfg.RuleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
return errors.Wrapf(err, "error retrieving rule files for %s", pat)
}
files = append(files, fs...)
}
return ruleManager.Update(
time.Duration(cfg.GlobalConfig.EvaluationInterval),
files,
cfg.GlobalConfig.ExternalLabels,
)
},
},
}
prometheus.MustRegister(configSuccess)
prometheus.MustRegister(configSuccessTime)
// Start all components while we wait for TSDB to open but only load
// initial config and mark ourselves as ready after it completed.
dbOpen := make(chan struct{})
// sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded).
type closeOnce struct {
C chan struct{}
once sync.Once
Close func()
}
// Wait until the server is ready to handle reloading.
reloadReady := &closeOnce{
C: make(chan struct{}),
}
reloadReady.Close = func() {
reloadReady.once.Do(func() {
close(reloadReady.C)
})
}
closer, err := initTracing(logger)
if err != nil {
level.Error(logger).Log("msg", "Unable to init tracing", "err", err)
os.Exit(2)
}
defer closer.Close()
listener, err := webHandler.Listener()
if err != nil {
level.Error(logger).Log("msg", "Unable to start web listener", "err", err)
os.Exit(1)
}
err = toolkit_web.Validate(*webConfig)
if err != nil {
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
os.Exit(1)
}
var g run.Group
{
// Termination handler.
term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
cancel := make(chan struct{})
g.Add(
func() error {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select {
case <-term:
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
reloadReady.Close()
case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
case <-cancel:
reloadReady.Close()
}
return nil
},
func(err error) {
close(cancel)
},
)
}
{
// Scrape discovery manager.
g.Add(
func() error {
err := discoveryManagerScrape.Run()
level.Info(logger).Log("msg", "Scrape discovery manager stopped")
return err
},
func(err error) {
level.Info(logger).Log("msg", "Stopping scrape discovery manager...")
cancelScrape()
},
)
}
{
// Notify discovery manager.
g.Add(
func() error {
err := discoveryManagerNotify.Run()
level.Info(logger).Log("msg", "Notify discovery manager stopped")
return err
},
func(err error) {
level.Info(logger).Log("msg", "Stopping notify discovery manager...")
cancelNotify()
},
)
}
{
// Scrape manager.
g.Add(
func() error {
// When the scrape manager receives a new targets list
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager so
// we wait until the config is fully loaded.
<-reloadReady.C
err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
level.Info(logger).Log("msg", "Scrape manager stopped")
return err
},
func(err error) {
// Scrape manager needs to be stopped before closing the local TSDB
// so that it doesn't try to write samples to a closed storage.
level.Info(logger).Log("msg", "Stopping scrape manager...")
scrapeManager.Stop()
},
)
}
{
// Reload handler.
// Make sure that sighup handler is registered with a redirect to the channel before the potentially
// long and synchronous tsdb init.
hup := make(chan os.Signal, 1)
signal.Notify(hup, syscall.SIGHUP)
cancel := make(chan struct{})
g.Add(
func() error {
<-reloadReady.C
for {
select {
case <-hup:
if err := reloadConfig(cfg.configFile, logger, noStepSubqueryInterval, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err)
}
case rc := <-webHandler.Reload():
if err := reloadConfig(cfg.configFile, logger, noStepSubqueryInterval, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err)
rc <- err
} else {
rc <- nil
}
case <-cancel:
return nil
}
}
},
func(err error) {
// Wait for any in-progress reloads to complete to avoid
// reloading things after they have been shutdown.
cancel <- struct{}{}
},
)
}
{
// Initial configuration loading.
cancel := make(chan struct{})
g.Add(
func() error {
select {
case <-dbOpen:
// In case a shutdown is initiated before the dbOpen is released
case <-cancel:
reloadReady.Close()
return nil
}
if err := reloadConfig(cfg.configFile, logger, noStepSubqueryInterval, reloaders...); err != nil {
return errors.Wrapf(err, "error loading config from %q", cfg.configFile)
}
reloadReady.Close()
webHandler.Ready()
level.Info(logger).Log("msg", "Server is ready to receive web requests.")
<-cancel
return nil
},
func(err error) {
close(cancel)
},
)
}
{
// Rule manager.
g.Add(
func() error {
<-reloadReady.C
ruleManager.Run()
return nil
},
func(err error) {
ruleManager.Stop()
},
)
}
{
// TSDB.
opts := cfg.tsdb.ToTSDBOptions()
cancel := make(chan struct{})
g.Add(
func() error {
level.Info(logger).Log("msg", "Starting TSDB ...")
if cfg.tsdb.WALSegmentSize != 0 {
if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 {
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
}
}
db, err := openDBWithMetrics(
cfg.localStoragePath,
logger,
prometheus.DefaultRegisterer,
&opts,
)
if err != nil {
return errors.Wrapf(err, "opening storage failed")
}
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:
level.Info(logger).Log("fs_type", fsType)
}
level.Info(logger).Log("msg", "TSDB started")
level.Debug(logger).Log("msg", "TSDB options",
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
"MaxBytes", cfg.tsdb.MaxBytes,
"NoLockfile", cfg.tsdb.NoLockfile,
"RetentionDuration", cfg.tsdb.RetentionDuration,
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
"AllowOverlappingBlocks", cfg.tsdb.AllowOverlappingBlocks,
"WALCompression", cfg.tsdb.WALCompression,
)
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
localStorage.Set(db, startTimeMargin)
close(dbOpen)
<-cancel
return nil
},
func(err error) {
if err := fanoutStorage.Close(); err != nil {
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
}
close(cancel)
},
)
}
{
// Web handler.
g.Add(
func() error {
if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil {
return errors.Wrapf(err, "error starting web server")
}
return nil
},
func(err error) {
cancelWeb()
},
)
}
{
// Notifier.
// Calling notifier.Stop() before ruleManager.Stop() will cause a panic if the ruleManager isn't running,
// so keep this interrupt after the ruleManager.Stop().
g.Add(
func() error {
// When the notifier manager receives a new targets list
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager
// so we wait until the config is fully loaded.
<-reloadReady.C
notifierManager.Run(discoveryManagerNotify.SyncCh())
level.Info(logger).Log("msg", "Notifier manager stopped")
return nil
},
func(err error) {
notifierManager.Stop()
},
)
}
if err := g.Run(); err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
level.Info(logger).Log("msg", "See you next time!")
}
func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options) (*tsdb.DB, error) {
db, err := tsdb.Open(
dir,
log.With(logger, "component", "tsdb"),
reg,
opts,
)
if err != nil {
return nil, err
}
reg.MustRegister(
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_lowest_timestamp_seconds",
Help: "Lowest timestamp value stored in the database.",
}, func() float64 {
bb := db.Blocks()
if len(bb) == 0 {
return float64(db.Head().MinTime() / 1000)
}
return float64(db.Blocks()[0].Meta().MinTime / 1000)
}), prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_min_time_seconds",
Help: "Minimum time bound of the head block.",
}, func() float64 { return float64(db.Head().MinTime() / 1000) }),
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_head_max_time_seconds",
Help: "Maximum timestamp of the head block.",
}, func() float64 { return float64(db.Head().MaxTime() / 1000) }),
)
return db, nil
}
type safePromQLNoStepSubqueryInterval struct {
value atomic.Int64
}
func durationToInt64Millis(d time.Duration) int64 {
return int64(d / time.Millisecond)
}
func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
i.value.Store(durationToInt64Millis(time.Duration(ev)))
}
func (i *safePromQLNoStepSubqueryInterval) Get(int64) int64 {
return i.value.Load()
}
type reloader struct {
name string
reloader func(*config.Config) error
}
func reloadConfig(filename string, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
start := time.Now()
timings := []interface{}{}
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
defer func() {
if err == nil {
configSuccess.Set(1)
configSuccessTime.SetToCurrentTime()
} else {
configSuccess.Set(0)
}
}()
conf, err := config.LoadFile(filename)
if err != nil {
return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename)
}
failed := false
for _, rl := range rls {
rstart := time.Now()
if err := rl.reloader(conf); err != nil {
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
failed = true
}
timings = append(timings, rl.name, time.Since(rstart))
}
if failed {
return errors.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
}
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
level.Info(logger).Log(append(l, timings...)...)
return nil
}
func startsOrEndsWithQuote(s string) bool {
return strings.HasPrefix(s, "\"") || strings.HasPrefix(s, "'") ||
strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'")
}
// compileCORSRegexString compiles given string and adds anchors
func compileCORSRegexString(s string) (*regexp.Regexp, error) {
r, err := relabel.NewRegexp(s)
if err != nil {
return nil, err
}
return r.Regexp, nil
}
// computeExternalURL computes a sanitized external URL from a raw input. It infers unset
// URL parts from the OS and the given listen address.
func computeExternalURL(u, listenAddr string) (*url.URL, error) {
if u == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
_, port, err := net.SplitHostPort(listenAddr)
if err != nil {
return nil, err
}
u = fmt.Sprintf("http://%s:%s/", hostname, port)
}
if startsOrEndsWithQuote(u) {
return nil, errors.New("URL must not begin or end with quotes")
}
eu, err := url.Parse(u)
if err != nil {
return nil, err
}
ppref := strings.TrimRight(eu.Path, "/")
if ppref != "" && !strings.HasPrefix(ppref, "/") {
ppref = "/" + ppref
}
eu.Path = ppref
return eu, nil
}
type sender interface {
Send(alerts ...*notifier.Alert)
}
// sendAlerts implements the rules.NotifyFunc for a Notifier.
func sendAlerts(s sender, externalURL string) rules.NotifyFunc {
return func(ctx context.Context, expr string, alerts ...*rules.Alert) {
var res []*notifier.Alert
for _, alert := range alerts {
a := ¬ifier.Alert{
StartsAt: alert.FiredAt,
Labels: alert.Labels,
Annotations: alert.Annotations,
GeneratorURL: externalURL + strutil.TableLinkForExpression(expr),
}
if !alert.ResolvedAt.IsZero() {
a.EndsAt = alert.ResolvedAt
} else {
a.EndsAt = alert.ValidUntil
}
res = append(res, a)
}
if len(alerts) > 0 {
s.Send(res...)
}
}
}
// readyStorage implements the Storage interface while allowing to set the actual
// storage at a later point in time.
type readyStorage struct {
mtx sync.RWMutex
db *tsdb.DB
startTimeMargin int64
}
// Set the storage.
func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
s.mtx.Lock()
defer s.mtx.Unlock()
s.db = db
s.startTimeMargin = startTimeMargin
}
// get is internal, you should use readyStorage as the front implementation layer.
func (s *readyStorage) get() *tsdb.DB {
s.mtx.RLock()
x := s.db
s.mtx.RUnlock()
return x
}
// StartTime implements the Storage interface.
func (s *readyStorage) StartTime() (int64, error) {
if x := s.get(); x != nil {
var startTime int64
if len(x.Blocks()) > 0 {
startTime = x.Blocks()[0].Meta().MinTime
} else {
startTime = time.Now().Unix() * 1000
}
// Add a safety margin as it may take a few minutes for everything to spin up.
return startTime + s.startTimeMargin, nil
}
return math.MaxInt64, tsdb.ErrNotReady
}
// Querier implements the Storage interface.
func (s *readyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
if x := s.get(); x != nil {
return x.Querier(ctx, mint, maxt)
}
return nil, tsdb.ErrNotReady
}
// ChunkQuerier implements the Storage interface.
func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
if x := s.get(); x != nil {
return x.ChunkQuerier(ctx, mint, maxt)
}
return nil, tsdb.ErrNotReady
}
func (s *readyStorage) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
if x := s.get(); x != nil {
return x.ExemplarQuerier(ctx)
}
return nil, tsdb.ErrNotReady
}
// Appender implements the Storage interface.
func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
if x := s.get(); x != nil {
return x.Appender(ctx)
}
return notReadyAppender{}
}
type notReadyAppender struct{}
func (n notReadyAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
// Close implements the Storage interface.
func (s *readyStorage) Close() error {
if x := s.get(); x != nil {
return x.Close()
}
return nil
}
// CleanTombstones implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
func (s *readyStorage) CleanTombstones() error {
if x := s.get(); x != nil {
return x.CleanTombstones()
}
return tsdb.ErrNotReady
}
// Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
if x := s.get(); x != nil {
return x.Delete(mint, maxt, ms...)
}
return tsdb.ErrNotReady
}
// Snapshot implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
func (s *readyStorage) Snapshot(dir string, withHead bool) error {
if x := s.get(); x != nil {
return x.Snapshot(dir, withHead)
}
return tsdb.ErrNotReady
}
// Stats implements the api_v1.TSDBAdminStats interface.
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
if x := s.get(); x != nil {
return x.Head().Stats(statsByLabelName), nil
}
return nil, tsdb.ErrNotReady
}
// ErrNotReady is returned if the underlying scrape manager is not ready yet.
var ErrNotReady = errors.New("Scrape manager not ready")
// ReadyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time.
type readyScrapeManager struct {
mtx sync.RWMutex
m *scrape.Manager
}
// Set the scrape manager.
func (rm *readyScrapeManager) Set(m *scrape.Manager) {
rm.mtx.Lock()
defer rm.mtx.Unlock()
rm.m = m
}
// Get the scrape manager. If is not ready, return an error.
func (rm *readyScrapeManager) Get() (*scrape.Manager, error) {
rm.mtx.RLock()
defer rm.mtx.RUnlock()
if rm.m != nil {
return rm.m, nil
}
return nil, ErrNotReady
}
// tsdbOptions is tsdb.Option version with defined units.
// This is required as tsdb.Option fields are unit agnostic (time).
type tsdbOptions struct {
WALSegmentSize units.Base2Bytes
RetentionDuration model.Duration
MaxBytes units.Base2Bytes
NoLockfile bool
AllowOverlappingBlocks bool
WALCompression bool
StripeSize int
MinBlockDuration model.Duration
MaxBlockDuration model.Duration
MaxExemplars int
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
return tsdb.Options{
WALSegmentSize: int(opts.WALSegmentSize),
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
MaxBytes: int64(opts.MaxBytes),
NoLockfile: opts.NoLockfile,
AllowOverlappingBlocks: opts.AllowOverlappingBlocks,
WALCompression: opts.WALCompression,
StripeSize: opts.StripeSize,
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
MaxExemplars: opts.MaxExemplars,
}
}
func initTracing(logger log.Logger) (io.Closer, error) {
// Set tracing configuration defaults.
cfg := &jcfg.Configuration{
ServiceName: "prometheus",
Disabled: true,
}
// Available options can be seen here:
// https://github.com/jaegertracing/jaeger-client-go#environment-variables
cfg, err := cfg.FromEnv()
if err != nil {
return nil, errors.Wrap(err, "unable to get tracing config from environment")
}
jLogger := jaegerLogger{logger: log.With(logger, "component", "tracing")}
tracer, closer, err := cfg.NewTracer(
jcfg.Logger(jLogger),
jcfg.Metrics(jprom.New()),
)
if err != nil {
return nil, errors.Wrap(err, "unable to init tracing")
}
opentracing.SetGlobalTracer(tracer)
return closer, nil
}
type jaegerLogger struct {
logger log.Logger
}
func (l jaegerLogger) Error(msg string) {
level.Error(l.logger).Log("msg", msg)
}
func (l jaegerLogger) Infof(msg string, args ...interface{}) {
keyvals := []interface{}{"msg", fmt.Sprintf(msg, args...)}
level.Info(l.logger).Log(keyvals...)
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
trxdb/logging.go | // Copyright 2019 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trxdb
import (
"os"
"github.com/dfuse-io/logging"
"go.uber.org/zap"
)
var traceEnabled bool
var zlog *zap.Logger
func init() {
logging.Register("github.com/zhongshuwen/historyexp/trxdb", &zlog)
if os.Getenv("TRACE") == "true" {
traceEnabled = true
}
}
| [
"\"TRACE\""
]
| []
| [
"TRACE"
]
| [] | ["TRACE"] | go | 1 | 0 | |
main.go | package main
import (
"embed"
"log"
"net/http"
"os"
"time"
"github.com/adhocore/urlsh/controller"
"github.com/adhocore/urlsh/router"
)
//go:embed assets
var embedAssetsFS embed.FS
func init() {
controller.EmbedAssetHandler = http.FileServer(http.FS(embedAssetsFS))
}
func getPort() string {
if port := os.Getenv("PORT"); port != "" {
return port
}
return "2000"
}
func main() {
port := getPort()
server := &http.Server{
Addr: ":" + port,
Handler: router.RegisterHandlers(),
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
}
log.Printf("Server running on port %v", port)
log.Fatal(server.ListenAndServe())
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
nf_core/launch.py | #!/usr/bin/env python
""" Launch a pipeline, interactively collecting params """
from __future__ import print_function
from collections import OrderedDict
import click
import errno
import jsonschema
import logging
import os
import re
import subprocess
import nf_core.utils, nf_core.list
import nf_core.workflow.parameters, nf_core.workflow.validation, nf_core.workflow.workflow
def launch_pipeline(workflow, params_local_uri, direct):
# Create a pipeline launch object
launcher = Launch(workflow)
# Get nextflow to fetch the workflow if we don't already have it
if not launcher.wf_ispath:
launcher.get_local_wf()
# Get the pipeline default parameters
launcher.parse_parameter_settings(params_local_uri)
# Find extra params from `nextflow config` command and main.nf
launcher.collect_pipeline_param_defaults()
# Group the parameters
launcher.group_parameters()
# Kick off the interactive wizard to collect user inputs
launcher.prompt_core_nxf_flags()
if not direct:
launcher.prompt_param_flags()
# Build and launch the `nextflow run` command
launcher.build_command()
launcher.launch_workflow()
class Launch(object):
""" Class to hold config option to launch a pipeline """
def __init__(self, workflow):
""" Initialise the class with empty placeholder vars """
# Check if the workflow name is actually a path
self.wf_ispath = os.path.exists(workflow)
# Prepend nf-core/ if it seems sensible
if 'nf-core' not in workflow and workflow.count('/') == 0 and not self.wf_ispath:
workflow = "nf-core/{}".format(workflow)
logging.debug("Prepending nf-core/ to workflow")
logging.info("Launching {}".format(workflow))
# Get list of local workflows to see if we have a cached version
self.local_wf = None
if not self.wf_ispath:
wfs = nf_core.list.Workflows()
wfs.get_local_nf_workflows()
for wf in wfs.local_workflows:
if workflow == wf.full_name:
self.local_wf = wf
self.workflow = workflow
self.nxf_flag_defaults = {
'-name': None,
'-r': None,
'-profile': 'standard',
'-w': os.getenv('NXF_WORK') if os.getenv('NXF_WORK') else './work',
'-resume': False
}
self.nxf_flag_help = {
'-name': 'Unique name for this nextflow run',
'-r': 'Release / revision to use',
'-profile': 'Config profile to use',
'-w': 'Work directory for intermediate files',
'-resume': 'Resume a previous workflow run'
}
self.nxf_flags = {}
self.parameters = []
self.parameter_keys = []
self.grouped_parameters = OrderedDict()
self.params_user = {}
self.nextflow_cmd = "nextflow run {}".format(self.workflow)
self.use_params_file = True
def get_local_wf(self):
"""
Check if this workflow has a local copy and use nextflow to pull it if not
"""
if not self.local_wf:
logging.info("Downloading workflow: {}".format(self.workflow))
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_output(['nextflow', 'pull', self.workflow], stderr=devnull)
except OSError as e:
if e.errno == errno.ENOENT:
raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.")
except subprocess.CalledProcessError as e:
raise AssertionError("`nextflow pull` returned non-zero error code: %s,\n %s", e.returncode, e.output)
else:
self.local_wf = nf_core.list.LocalWorkflow(self.workflow)
self.local_wf.get_local_nf_workflow_details()
def parse_parameter_settings(self, params_local_uri = None):
"""
Load full parameter info from the pipeline parameters.settings.json file
"""
try:
params_json_str = None
# Params file supplied to launch command
if params_local_uri:
with open(params_local_uri, 'r') as fp:
params_json_str = fp.read()
# Get workflow file from local cached copy
else:
if self.wf_ispath:
local_params_path = os.path.join(self.workflow, 'parameters.settings.json')
else:
local_params_path = os.path.join(self.local_wf.local_path, 'parameters.settings.json')
if os.path.exists(local_params_path):
with open(local_params_path, 'r') as fp:
params_json_str = fp.read()
if not params_json_str:
raise LookupError('parameters.settings.json file not found')
try:
self.parameters = nf_core.workflow.parameters.Parameters.create_from_json(params_json_str)
for p in self.parameters:
self.parameter_keys.append(p.name)
logging.debug("Found param from parameters.settings.json: param.{}".format(p.name))
except ValueError as e:
logging.error("Could not parse pipeline parameters.settings.json JSON:\n {}\n".format(e))
except jsonschema.exceptions.ValidationError as e:
logging.error("Validation error with pipeline parameters.settings.json:\n Message: {}\n Instance: {}\n".format(e.message, e.instance))
except LookupError as e:
print("WARNING: Could not parse parameter settings file for `{pipeline}`:\n {exception}".format(
pipeline=self.workflow, exception=e))
def collect_pipeline_param_defaults(self):
""" Collect the default params and values from the workflow """
logging.debug("Collecting pipeline parameter defaults\n")
config = nf_core.utils.fetch_wf_config(self.workflow, self.local_wf)
for key, value in config.items():
keys = key.split('.')
if keys[0] == 'params' and len(keys) == 2 and keys[1] not in self.parameter_keys:
# Try to guess the variable type from the default value
p_type = 'string'
p_default = str(value)
# All digits - int
if value.isdigit():
p_type = 'integer'
p_default = int(value)
else:
# Not just digis - try converting to a float
try:
p_default = float(value)
p_type = 'decimal'
except ValueError:
pass
# Strings 'true' and 'false' - booleans
if value == 'true' or value == 'false':
p_type = 'boolean'
p_default = True if value == 'true' else False
# Build the Parameter object
parameter = (nf_core.workflow.parameters.Parameter.builder()
.name(keys[1])
.label(keys[1])
.usage(None)
.param_type(p_type)
.choices(None)
.default(p_default)
.pattern(".*")
.render("textfield")
.arity(None)
.group("Other pipeline parameters")
.build())
self.parameters.append(parameter)
self.parameter_keys.append(keys[1])
logging.debug("Discovered param from `nextflow config`: param.{}".format(keys[1]))
# Not all parameters can be found with `nextflow config` - try searching main.nf and config files
searchfiles = []
pattern = re.compile(r'params\.([\w\d]+)')
wf_base = self.workflow if self.wf_ispath else self.local_wf.local_path
if os.path.exists(os.path.join(wf_base, 'main.nf')):
searchfiles.append(os.path.join(wf_base, 'main.nf'))
if os.path.exists(os.path.join(wf_base, 'nextflow.config')):
searchfiles.append(os.path.join(wf_base, 'nextflow.config'))
if os.path.isdir(os.path.join(wf_base, 'conf')):
for fn in os.listdir(os.path.join(wf_base, 'conf')):
searchfiles.append(os.path.join(wf_base, 'conf', fn))
for sf in searchfiles:
with open(sf, 'r') as fh:
for l in fh:
match = re.search(pattern, l)
if match:
param = match.group(1)
if param not in self.parameter_keys:
# Build the Parameter object
parameter = (nf_core.workflow.parameters.Parameter.builder()
.name(param)
.label(param)
.usage(None)
.param_type("string")
.choices(None)
.default("")
.pattern(".*")
.render("textfield")
.arity(None)
.group("Other pipeline parameters")
.build())
self.parameters.append(parameter)
self.parameter_keys.append(param)
logging.debug("Discovered param from {}: param.{}".format(os.path.relpath(sf, wf_base), param))
def prompt_core_nxf_flags(self):
""" Ask the user if they want to override any default values """
# Main nextflow flags
click.secho("Main nextflow options", bold=True, underline=True)
for flag, f_default in self.nxf_flag_defaults.items():
# Click prompts don't like None, so we have to use an empty string instead
f_default_print = f_default
if f_default is None:
f_default = ''
f_default_print = 'None'
# Overwrite the default prompt for boolean
if isinstance(f_default, bool):
f_default_print = 'Y/n' if f_default else 'y/N'
# Prompt for a response
f_user = click.prompt(
"\n{}\n {} {}".format(
self.nxf_flag_help[flag],
click.style(flag, fg='blue'),
click.style('[{}]'.format(str(f_default_print)), fg='green')
),
default = f_default,
show_default = False
)
# Only save if we've changed the default
if f_user != f_default:
# Convert string bools to real bools
try:
f_user = f_user.strip('"').strip("'")
if f_user.lower() == 'true': f_user = True
if f_user.lower() == 'false': f_user = False
except AttributeError:
pass
self.nxf_flags[flag] = f_user
def group_parameters(self):
"""Groups parameters by their 'group' property.
Args:
parameters (list): Collection of parameter objects.
Returns:
dict: Parameter objects grouped by the `group` property.
"""
for param in self.parameters:
if param.group not in self.grouped_parameters.keys():
self.grouped_parameters[param.group] = []
self.grouped_parameters[param.group].append(param)
def prompt_param_flags(self):
""" Prompts the user for parameter input values and validates them. """
for group_label, params in self.grouped_parameters.items():
click.echo("\n\n{}{}".format(
click.style('Parameter group: ', bold=True, underline=True),
click.style(group_label, bold=True, underline=True, fg='red')
))
use_defaults = click.confirm(
"Do you want to change the group's defaults? "+click.style('[y/N]', fg='green'),
default=False, show_default=False)
if not use_defaults:
continue
for parameter in params:
# Skip this option if the render mode is none
value_is_valid = parameter.render == 'none'
first_attempt = True
while not value_is_valid:
# Start building the string to show to the user - label and usage
plines = ['']
if parameter.label:
plines.append(click.style(parameter.label, bold=True))
if parameter.usage:
plines.append(click.style(parameter.usage))
# Add the choices / range if applicable
if parameter.choices:
rc = 'Choices' if parameter.type == 'string' else 'Range'
choices_string = ", ".join([click.style(x, fg='yellow') for x in parameter.choices if x != ''])
plines.append('{}: {}'.format(rc, choices_string))
# Reset the choice display if boolean
if parameter.type == "boolean":
pdef_val = 'Y/n' if parameter.default_value else 'y/N'
else:
pdef_val = parameter.default_value
# Final line to print - command and default
if pdef_val == '':
flag_default = ''
else:
flag_default = click.style(' [{}]'.format(pdef_val), fg='green')
flag_prompt = click.style(' --{}'.format(parameter.name), fg='blue') + flag_default
# Only show this final prompt if we're trying again
if first_attempt:
plines.append(flag_prompt)
else:
plines = [flag_prompt]
first_attempt = False
# Use click.confirm if a boolean for default input handling
if parameter.type == "boolean":
parameter.value = click.confirm("\n".join(plines),
default=parameter.default_value, show_default=False)
# Use click.prompt if anything else
else:
parameter.value = click.prompt("\n".join(plines),
default=parameter.default_value, show_default=False)
# Set input parameter types
try:
if parameter.type == "integer":
parameter.value = int(parameter.value)
elif parameter.type == "decimal":
parameter.value = float(parameter.value)
elif parameter.type == "string":
parameter.value = str(parameter.value)
except ValueError as e:
logging.error("Could not set variable type: {}".format(e))
# Validate the input
try:
parameter.validate()
except Exception as e:
click.secho("\nERROR: {}".format(e), fg='red')
click.secho("Please try again:")
continue
else:
value_is_valid = True
def build_command(self):
""" Build the nextflow run command based on what we know """
for flag, val in self.nxf_flags.items():
# Boolean flags like -resume
if isinstance(val, bool):
if val:
self.nextflow_cmd = "{} {}".format(self.nextflow_cmd, flag)
else:
logging.warning("TODO: Can't set false boolean flags currently.")
# String values
else:
self.nextflow_cmd = '{} {} "{}"'.format(self.nextflow_cmd, flag, val.replace('"', '\\"'))
# Write the user selection to a file and run nextflow with that
if self.use_params_file:
path = self.create_nfx_params_file()
if path is not None:
self.nextflow_cmd = '{} {} "{}"'.format(self.nextflow_cmd, "-params-file", path)
self.write_params_as_full_json()
# Call nextflow with a list of command line flags
else:
for param, val in self.params_user.items():
# Boolean flags like --saveTrimmed
if isinstance(val, bool):
if val:
self.nextflow_cmd = "{} --{}".format(self.nextflow_cmd, param)
else:
logging.error("Can't set false boolean flags.")
# everything else
else:
self.nextflow_cmd = '{} --{} "{}"'.format(self.nextflow_cmd, param, val.replace('"', '\\"'))
def create_nfx_params_file(self):
working_dir = os.getcwd()
output_file = os.path.join(working_dir, "nfx-params.json")
json_string = nf_core.workflow.parameters.Parameters.in_nextflow_json(self.parameters, indent=4)
if json_string == '{}':
return None
with open(output_file, "w") as fp:
fp.write(json_string)
return output_file
def write_params_as_full_json(self, outdir = os.getcwd()):
output_file = os.path.join(outdir, "full-params.json")
json_string = nf_core.workflow.parameters.Parameters.in_full_json(self.parameters, indent=4)
with open(output_file, "w") as fp:
fp.write(json_string)
return output_file
def launch_workflow(self):
""" Launch nextflow if required """
click.secho("\n\nNextflow command:", bold=True, underline=True)
click.secho(" {}\n\n".format(self.nextflow_cmd), fg='magenta')
if click.confirm(
'Do you want to run this command now? '+click.style('[y/N]', fg='green'),
default=False,
show_default=False
):
logging.info("Launching workflow!")
subprocess.call(self.nextflow_cmd, shell=True)
| []
| []
| [
"NXF_WORK"
]
| [] | ["NXF_WORK"] | python | 1 | 0 | |
Entry-2020-12-16.py | from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
import time
import os, fnmatch, shutil
import threading
import traceback
import re
import platform
def random_view(driver):
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def init(chrome_path):
result = find(r'chromedriver*.exe', os.path.dirname(os.path.realpath(__file__)))
shutil.copyfile(result[0], os.path.join(chrome_path, r'chromedriver.exe'))
def entry(chrome_path, dest_page, stay_in_min):
if not re.match(r'^https?:/{2}\w.+$', dest_page):
raise RuntimeError('invalid URL: ' + dest_page)
try:
chromeOptions = webdriver.ChromeOptions()
chromeOptions.add_argument("--incognito")
# chromeOptions.add_argument('--headless')
chromeOptions.add_argument('--no-sandbox')
chrome_drive_name = r'chromedriver.exe'
if platform.system() == 'Linux':
chrome_drive_name = r'chromedriver'
driver = webdriver.Chrome(os.path.join(chrome_path, chrome_drive_name), options=chromeOptions) # 浏览器驱动
driver.delete_all_cookies() # 删除cookie
driver.get(dest_page)
driver.refresh()
time.sleep(15)
source = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/div/div[1]/a')
# action chain object creation
action = ActionChains(driver)
# move to the element and click then perform the operation
action.move_to_element(source).click().perform()
random_view(driver)
# to close the browser
time.sleep(60 * stay_in_min)
driver.quit()
except:
traceback.print_exc()
try:
driver.quit()
except:
traceback.print_exc()
if __name__ == "__main__":
chrome_path = r'C:\Program Files\Google\Chrome\Application'
NB_THREAD = 20
if platform.system() == 'Linux':
chrome_path = r'/usr/bin'
NB_THREAD = 50
else:
init(chrome_path)
while True:
if (threading.active_count() < NB_THREAD):
thread = threading.Thread(target=entry,args=(chrome_path, os.getenv('DEST_PAGE'), 5,))
thread.start()
time.sleep(3)
time.sleep(99999) # sleep forever
| []
| []
| [
"DEST_PAGE"
]
| [] | ["DEST_PAGE"] | python | 1 | 0 | |
server.py | #!/usr/bin/env python3
import os
from flask import Flask, jsonify
from fundamentus import get_data
from datetime import datetime
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
# First update
lista, dia = dict(get_data()), datetime.strftime(datetime.today(), '%d')
lista = {outer_k: {inner_k: float(inner_v) for inner_k, inner_v in outer_v.items()} for outer_k, outer_v in lista.items()}
@app.route("/")
def json_api():
global lista, dia
# Then only update once a day
if dia == datetime.strftime(datetime.today(), '%d'):
return jsonify(lista)
else:
lista, dia = dict(get_data()), datetime.strftime(datetime.today(), '%d')
lista = {outer_k: {inner_k: float(inner_v) for inner_k, inner_v in outer_v.items()} for outer_k, outer_v in lista.items()}
return jsonify(lista)
port = int(os.environ.get('PORT', 5000))
app.run(debug=True, host='0.0.0.0', port=port)
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
scripts/training/train.py | """
######################################################################################################
# Class of scripts to train the Neural Networks #
######################################################################################################
"""
import argparse
import os
import sys
from six.moves import shlex_quote
parser = argparse.ArgumentParser(description="Run commands")
parser.add_argument('-w', '--num-workers', default=1, type=int,
help="Number of workers")
parser.add_argument('-r', '--remotes', default=None,
help='The address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901).')
parser.add_argument('-e', '--env-id', type=str, default="PongDeterministic-v0",
help="Environment id")
parser.add_argument('-l', '--log-dir', type=str, default="/tmp/pong",
help="Log directory path")
parser.add_argument('-n', '--dry-run', action='store_true',
help="Print out commands rather than executing them")
parser.add_argument('-m', '--mode', type=str, default='tmux',
help="tmux: run workers in a tmux session. nohup: run workers with nohup. child: run workers as child processes")
parser.add_argument('-p', '--policy', type=str, default='feudal',
help="lstm or feudal policy")
# Add visualise tag
parser.add_argument('--visualise', action='store_true',
help="Visualise the gym environment by running env.render() between each timestep")
def new_cmd(session, name, cmd, mode, logdir, shell):
"""
Function for creating a new command environment to be run during training
Parameters
----------
session : str
environment id to be registered in Gym
name : str
Client ID
cmd : object
BLANK
mode : str
Mode for training to be run in
logdir : object
Log directory
shell : object
BLANK
"""
if isinstance(cmd, (list, tuple)):
cmd = " ".join(shlex_quote(str(v)) for v in cmd)
if mode == 'tmux':
return name, "tmux send-keys -t {}:{} {} Enter".format(session, name, shlex_quote(cmd))
elif mode == 'child':
return name, "{} >{}/{}.{}.out 2>&1 & echo kill $! >>{}/kill.sh".format(cmd, logdir, session, name, logdir)
elif mode == 'nohup':
return name, "nohup {} -c {} >{}/{}.{}.out 2>&1 & echo kill $! >>{}/kill.sh".format(shell, shlex_quote(cmd), logdir, session, name, logdir)
def create_commands(session, num_workers, remotes, env_id, logdir, shell='bash',
policy='lstm', mode='tmux', visualise=False):
"""
Function for creating new commands
Parameters
----------
session : object
environment id to be registered in Gym
num_workers : int
Number of Workers in Feudal Network
remotes : object
BLANK
env_id : object
Environment ID
logdir : object
Log directory
shell : object
BLANK
policy : object
Policy to be trained on
mode : object
BLANK
visualise : bool
Enable/Disable visualization
"""
# for launching the TF workers and for launching tensorboard
base_cmd = [
'CUDA_VISIBLE_DEVICES=',
sys.executable, 'worker.py',
'--log-dir', logdir,
'--env-id', env_id,
'--num-workers', str(num_workers)]
if visualise:
base_cmd += ['--visualise']
if remotes is None:
remotes = ["1"] * num_workers
else:
remotes = remotes.split(',')
assert len(remotes) == num_workers
cmds_map = [new_cmd(session, "ps", base_cmd + ["--job-name", "ps"], mode, logdir, shell)]
for i in range(num_workers):
cmds_map += [new_cmd(session,
"w-%d" % i, base_cmd + ["--job-name", "worker", "--task", str(i), "--remotes", remotes[i], "--policy", policy], mode, logdir, shell)]
cmds_map += [new_cmd(session, "tb", ["tensorboard", "--logdir", logdir, "--port", "12345"], mode, logdir, shell)]
if mode == 'tmux':
cmds_map += [new_cmd(session, "htop", ["htop"], mode, logdir, shell)]
windows = [v[0] for v in cmds_map]
notes = []
cmds = [
"mkdir -p {}".format(logdir),
"echo {} {} > {}/cmd.sh".format(sys.executable, ' '.join([shlex_quote(arg) for arg in sys.argv if arg != '-n']), logdir),
]
if mode == 'nohup' or mode == 'child':
cmds += ["echo '#!/bin/sh' >{}/kill.sh".format(logdir)]
notes += ["Run `source {}/kill.sh` to kill the job".format(logdir)]
if mode == 'tmux':
notes += ["Use `tmux attach -t {}` to watch process output".format(session)]
notes += ["Use `tmux kill-session -t {}` to kill the job".format(session)]
else:
notes += ["Use `tail -f {}/*.out` to watch process output".format(logdir)]
notes += ["Point your browser to http://localhost:12345 to see Tensorboard"]
if mode == 'tmux':
cmds += [
"kill $( lsof -i:12345 -t ) > /dev/null 2>&1", # kill any process using tensorboard's port
"kill $( lsof -i:12222-{} -t ) > /dev/null 2>&1".format(num_workers+12222), # kill any processes using ps / worker ports
"tmux kill-session -t {}".format(session),
"tmux new-session -s {} -n {} -d {}".format(session, windows[0], shell)
]
for w in windows[1:]:
cmds += ["tmux new-window -t {} -n {} {}".format(session, w, shell)]
cmds += ["sleep 1"]
for window, cmd in cmds_map:
cmds += [cmd]
return cmds, notes
def run():
"""
Run function for this script file.
"""
args = parser.parse_args()
cmds, notes = create_commands("a3c", args.num_workers, args.remotes, args.env_id, args.log_dir, policy=args.policy, mode=args.mode, visualise=args.visualise)
if args.dry_run:
print("Dry-run mode due to -n flag, otherwise the following commands would be executed:")
else:
print("Executing the following commands:")
print("\n".join(cmds))
print("")
if not args.dry_run:
if args.mode == "tmux":
os.environ["TMUX"] = ""
os.system("\n".join(cmds))
print('\n'.join(notes))
if __name__ == "__main__":
run()
| []
| []
| [
"TMUX"
]
| [] | ["TMUX"] | python | 1 | 0 | |
config/url.go | package config
import (
"errors"
"strings"
format "github.com/xkfen/go-git/v5/plumbing/format/config"
)
var (
errURLEmptyInsteadOf = errors.New("url config: empty insteadOf")
)
// Url defines Url rewrite rules
type URL struct {
// Name new base url
Name string
// Any URL that starts with this value will be rewritten to start, instead, with <base>.
// When more than one insteadOf strings match a given URL, the longest match is used.
InsteadOf string
// raw representation of the subsection, filled by marshal or unmarshal are
// called.
raw *format.Subsection
}
// Validate validates fields of branch
func (b *URL) Validate() error {
if b.InsteadOf == "" {
return errURLEmptyInsteadOf
}
return nil
}
const (
insteadOfKey = "insteadOf"
)
func (u *URL) unmarshal(s *format.Subsection) error {
u.raw = s
u.Name = s.Name
u.InsteadOf = u.raw.Option(insteadOfKey)
return nil
}
func (u *URL) marshal() *format.Subsection {
if u.raw == nil {
u.raw = &format.Subsection{}
}
u.raw.Name = u.Name
u.raw.SetOption(insteadOfKey, u.InsteadOf)
return u.raw
}
func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL {
var longestMatch *URL
for _, u := range urls {
if !strings.HasPrefix(remoteURL, u.InsteadOf) {
continue
}
// according to spec if there is more than one match, take the logest
if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) {
longestMatch = u
}
}
return longestMatch
}
func (u *URL) ApplyInsteadOf(url string) string {
if !strings.HasPrefix(url, u.InsteadOf) {
return url
}
return u.Name + url[len(u.InsteadOf):]
}
| []
| []
| []
| [] | [] | go | null | null | null |
middlewares/auth.go | package middlewares
import (
jwt "github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/twinj/uuid"
"net/http"
"os"
"poke/models"
"strconv"
"time"
)
type AccessDetails struct {
AccessUUID string
UserUUID uint32
Expires int64
jwt.StandardClaims
}
func GenerateToken(u uint32) (string, error) {
var err error
exp := time.Now().Add(time.Hour * 6)
session := uuid.NewV4().String()
os.Setenv("API_KEY", "ASDLKFAKSLDFKAJSHDFAJSHDLJFHASLKDFJHAL")
claims := AccessDetails{UserUUID: u, AccessUUID: session, Expires: exp.Unix()}
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
token, err := rt.SignedString([]byte(os.Getenv("API_KEY")))
if err != nil {
return "", err
}
errorRedis := models.Db.R.Set(session, strconv.Itoa(int(u)), exp.Sub(time.Now())).Err()
if errorRedis != nil {
return "", errorRedis
}
return token, nil
}
func ValidateToken(c *gin.Context) {
tknStr := c.GetHeader("Authentication")
claims := &AccessDetails{}
tkn, err := jwt.ParseWithClaims(tknStr, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(os.Getenv("API_KEY")), nil
})
if err != nil {
if err == jwt.ErrSignatureInvalid {
c.String(http.StatusUnauthorized, "invalid signature")
c.Abort()
}
c.String(http.StatusBadRequest, "invalid params")
c.Abort()
}
if !tkn.Valid {
c.String(http.StatusUnauthorized, "invalid token")
c.Abort()
}
userID, err := models.Db.R.Get(claims.AccessUUID).Result()
if err != nil {
c.String(http.StatusUnauthorized, "session not found")
c.Abort()
}
c.Set("userUUID", userID)
c.Next()
}
| [
"\"API_KEY\"",
"\"API_KEY\""
]
| []
| [
"API_KEY"
]
| [] | ["API_KEY"] | go | 1 | 0 | |
src/core/mcir/common.go | package mcir
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
//uuid "github.com/google/uuid"
"github.com/cloud-barista/cb-spider/interface/api"
"github.com/cloud-barista/cb-tumblebug/src/core/common"
"github.com/xwb1989/sqlparser"
// CB-Store
)
// CB-Store
//var cblog *logrus.Logger
//var store icbs.Store
//var SPIDER_REST_URL string
func init() {
//cblog = config.Cblogger
//store = cbstore.GetStore()
//SPIDER_REST_URL = os.Getenv("SPIDER_REST_URL")
}
func DelAllResources(nsId string, resourceType string, forceFlag string) error {
resourceIdList := ListResourceId(nsId, resourceType)
if len(resourceIdList) == 0 {
return nil
}
for _, v := range resourceIdList {
err := DelResource(nsId, resourceType, v, forceFlag)
if err != nil {
return err
}
}
return nil
}
//func DelResource(nsId string, resourceType string, resourceId string, forceFlag string) (int, []byte, error) {
func DelResource(nsId string, resourceType string, resourceId string, forceFlag string) error {
//fmt.Println("[Delete " + resourceType + "] " + resourceId)
fmt.Printf("DelResource() called; %s %s %s \n", nsId, resourceType, resourceId) // for debug
check, _ := CheckResource(nsId, resourceType, resourceId)
if !check {
errString := "The " + resourceType + " " + resourceId + " does not exist."
//mapA := map[string]string{"message": errString}
//mapB, _ := json.Marshal(mapA)
err := fmt.Errorf(errString)
//return http.StatusNotFound, mapB, err
return err
}
key := common.GenResourceKey(nsId, resourceType, resourceId)
fmt.Println("key: " + key)
keyValue, _ := common.CBStore.Get(key)
/*
if keyValue == nil {
mapA := map[string]string{"message": "Failed to find the resource with given ID."}
mapB, _ := json.Marshal(mapA)
err := fmt.Errorf("Failed to find the resource with given ID.")
return http.StatusNotFound, mapB, err
}
*/
//fmt.Println("keyValue: " + keyValue.Key + " / " + keyValue.Value)
//cspType := common.GetResourcesCspType(nsId, resourceType, resourceId)
if os.Getenv("SPIDER_CALL_METHOD") == "REST" {
var url string
// Create Req body
type JsonTemplate struct {
ConnectionName string
}
tempReq := JsonTemplate{}
switch resourceType {
case "image":
// delete image info
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
//return http.StatusInternalServerError, nil, err
return err
}
sql := "DELETE FROM `image` WHERE `id` = '" + resourceId + "';"
fmt.Println("sql: " + sql)
// https://stackoverflow.com/questions/42486032/golang-sql-query-syntax-validator
_, err = sqlparser.Parse(sql)
if err != nil {
//return
}
stmt, err := common.MYDB.Prepare(sql)
if err != nil {
fmt.Println(err.Error())
}
_, err = stmt.Exec()
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data deleted successfully..")
}
//return http.StatusOK, nil, nil
return nil
case "spec":
// delete spec info
//get related recommend spec
//keyValue, err := common.CBStore.Get(key)
content := TbSpecInfo{}
json.Unmarshal([]byte(keyValue.Value), &content)
/*
if err != nil {
common.CBLog.Error(err)
return http.StatusInternalServerError, nil, err
}
*/
//
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
//return http.StatusInternalServerError, nil, err
return err
}
//delete related recommend spec
err = DelRecommendSpec(nsId, resourceId, content.Num_vCPU, content.Mem_GiB, content.Storage_GiB)
if err != nil {
common.CBLog.Error(err)
//return http.StatusInternalServerError, nil, err
return err
}
sql := "DELETE FROM `spec` WHERE `id` = '" + resourceId + "';"
fmt.Println("sql: " + sql)
// https://stackoverflow.com/questions/42486032/golang-sql-query-syntax-validator
_, err = sqlparser.Parse(sql)
if err != nil {
//return
}
stmt, err := common.MYDB.Prepare(sql)
if err != nil {
fmt.Println(err.Error())
}
_, err = stmt.Exec()
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data deleted successfully..")
}
//return http.StatusOK, nil, nil
return nil
case "sshKey":
temp := TbSshKeyInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
tempReq.ConnectionName = temp.ConnectionName
url = common.SPIDER_REST_URL + "/keypair/" + temp.Name //+ "?connection_name=" + temp.ConnectionName
case "vNet":
temp := TbVNetInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
tempReq.ConnectionName = temp.ConnectionName
url = common.SPIDER_REST_URL + "/vpc/" + temp.Name //+ "?connection_name=" + temp.ConnectionName
case "securityGroup":
temp := TbSecurityGroupInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
tempReq.ConnectionName = temp.ConnectionName
url = common.SPIDER_REST_URL + "/securitygroup/" + temp.Name //+ "?connection_name=" + temp.ConnectionName
/*
case "subnet":
temp := subnetInfo{}
json.Unmarshal([]byte(keyValue.Value), &content)
return content.CspSubnetId
case "publicIp":
temp := publicIpInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
tempReq.ConnectionName = temp.ConnectionName
url = common.SPIDER_REST_URL + "/publicip/" + temp.CspPublicIpName //+ "?connection_name=" + temp.ConnectionName
case "vNic":
temp := vNicInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
tempReq.ConnectionName = temp.ConnectionName
url = common.SPIDER_REST_URL + "/vnic/" + temp.CspVNicName //+ "?connection_name=" + temp.ConnectionName
*/
default:
err := fmt.Errorf("invalid resourceType")
//return http.StatusBadRequest, nil, err
return err
}
fmt.Println("url: " + url)
method := "DELETE"
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
//req, err := http.NewRequest(method, url, nil)
payload, _ := json.MarshalIndent(tempReq, "", " ")
//fmt.Println("payload: " + string(payload))
req, err := http.NewRequest(method, url, strings.NewReader(string(payload)))
if err != nil {
common.CBLog.Error(err)
return err
}
req.Header.Add("Content-Type", "application/json")
res, err := client.Do(req)
if err != nil {
common.CBLog.Error(err)
return err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
fmt.Println(string(body))
if err != nil {
common.CBLog.Error(err)
return err
}
/*
if res.StatusCode == 400 || res.StatusCode == 401 {
fmt.Println("HTTP Status code 400 Bad Request or 401 Unauthorized.")
err := fmt.Errorf("HTTP Status code 400 Bad Request or 401 Unauthorized")
common.CBLog.Error(err)
return res, err
}
// delete vNet info
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
return res, err
}
return res, nil
*/
fmt.Println("HTTP Status code " + strconv.Itoa(res.StatusCode))
switch {
case forceFlag == "true":
url += "?force=true"
fmt.Println("forceFlag == true; url: " + url)
req, err := http.NewRequest(method, url, strings.NewReader(string(payload)))
if err != nil {
common.CBLog.Error(err)
//return err
}
req.Header.Add("Content-Type", "application/json")
res, err := client.Do(req)
if err != nil {
common.CBLog.Error(err)
//return err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
fmt.Println(string(body))
if err != nil {
common.CBLog.Error(err)
//return err
}
err = common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
//return res.StatusCode, body, err
return err
}
//return res.StatusCode, body, nil
return nil
case res.StatusCode >= 400 || res.StatusCode < 200:
err := fmt.Errorf(string(body))
common.CBLog.Error(err)
//return res.StatusCode, body, err
return err
default:
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
//return res.StatusCode, body, err
return err
}
//return res.StatusCode, body, nil
return nil
}
} else {
// CCM API 설정
ccm := api.NewCloudResourceHandler()
err := ccm.SetConfigPath(os.Getenv("CBTUMBLEBUG_ROOT") + "/conf/grpc_conf.yaml")
if err != nil {
common.CBLog.Error("ccm failed to set config : ", err)
return err
}
err = ccm.Open()
if err != nil {
common.CBLog.Error("ccm api open failed : ", err)
return err
}
defer ccm.Close()
switch resourceType {
case "image":
// delete image info
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
//return http.StatusInternalServerError, nil, err
return err
}
sql := "DELETE FROM `image` WHERE `id` = '" + resourceId + "';"
fmt.Println("sql: " + sql)
// https://stackoverflow.com/questions/42486032/golang-sql-query-syntax-validator
_, err = sqlparser.Parse(sql)
if err != nil {
//return
}
stmt, err := common.MYDB.Prepare(sql)
if err != nil {
fmt.Println(err.Error())
}
_, err = stmt.Exec()
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data deleted successfully..")
}
//return http.StatusOK, nil, nil
return nil
case "spec":
// delete spec info
//get related recommend spec
content := TbSpecInfo{}
json.Unmarshal([]byte(keyValue.Value), &content)
err := common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
return err
}
//delete related recommend spec
err = DelRecommendSpec(nsId, resourceId, content.Num_vCPU, content.Mem_GiB, content.Storage_GiB)
if err != nil {
common.CBLog.Error(err)
return err
}
sql := "DELETE FROM `spec` WHERE `id` = '" + resourceId + "';"
fmt.Println("sql: " + sql)
// https://stackoverflow.com/questions/42486032/golang-sql-query-syntax-validator
_, err = sqlparser.Parse(sql)
if err != nil {
//return
}
stmt, err := common.MYDB.Prepare(sql)
if err != nil {
fmt.Println(err.Error())
}
_, err = stmt.Exec()
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data deleted successfully..")
}
return nil
case "sshKey":
temp := TbSshKeyInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
_, err := ccm.DeleteKeyByParam(temp.ConnectionName, temp.Name, forceFlag)
if err != nil {
common.CBLog.Error(err)
return err
}
case "vNet":
temp := TbVNetInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
_, err := ccm.DeleteVPCByParam(temp.ConnectionName, temp.Name, forceFlag)
if err != nil {
common.CBLog.Error(err)
return err
}
case "securityGroup":
temp := TbSecurityGroupInfo{}
json.Unmarshal([]byte(keyValue.Value), &temp)
_, err := ccm.DeleteSecurityByParam(temp.ConnectionName, temp.Name, forceFlag)
if err != nil {
common.CBLog.Error(err)
return err
}
default:
err := fmt.Errorf("invalid resourceType")
return err
}
err = common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
return err
}
return nil
}
}
func ListResourceId(nsId string, resourceType string) []string {
if resourceType == "image" ||
resourceType == "sshKey" ||
resourceType == "spec" ||
resourceType == "vNet" ||
//resourceType == "subnet" ||
//resourceType == "publicIp" ||
//resourceType == "vNic" ||
resourceType == "securityGroup" {
// continue
} else {
return []string{"invalid resource type"}
}
fmt.Println("[Get " + resourceType + " list")
key := "/ns/" + nsId + "/resources/" + resourceType
fmt.Println(key)
keyValue, _ := common.CBStore.GetList(key, true)
var resourceList []string
for _, v := range keyValue {
//if !strings.Contains(v.Key, "vm") {
resourceList = append(resourceList, strings.TrimPrefix(v.Key, "/ns/"+nsId+"/resources/"+resourceType+"/"))
//}
}
for _, v := range resourceList {
fmt.Println("<" + v + "> \n")
}
fmt.Println("===============================================")
return resourceList
}
func ListResource(nsId string, resourceType string) (interface{}, error) {
if resourceType == "image" ||
resourceType == "sshKey" ||
resourceType == "spec" ||
resourceType == "vNet" ||
//resourceType == "subnet" ||
//resourceType == "publicIp" ||
//resourceType == "vNic" ||
resourceType == "securityGroup" {
// continue
} else {
errString := "Cannot list " + resourceType + "s."
err := fmt.Errorf(errString)
return nil, err
}
fmt.Println("[Get " + resourceType + " list")
key := "/ns/" + nsId + "/resources/" + resourceType
fmt.Println(key)
keyValue, err := common.CBStore.GetList(key, true)
if err != nil {
common.CBLog.Error(err)
/*
fmt.Println("func ListResource; common.CBStore.GetList gave error")
var resourceList []string
for _, v := range keyValue {
resourceList = append(resourceList, strings.TrimPrefix(v.Key, "/ns/"+nsId+"/resources/"+resourceType+"/"))
}
for _, v := range resourceList {
fmt.Println("<" + v + "> \n")
}
fmt.Println("===============================================")
*/
return nil, err
}
if keyValue != nil {
switch resourceType {
case "image":
res := []TbImageInfo{}
for _, v := range keyValue {
tempObj := TbImageInfo{}
json.Unmarshal([]byte(v.Value), &tempObj)
res = append(res, tempObj)
}
return res, nil
case "securityGroup":
res := []TbSecurityGroupInfo{}
for _, v := range keyValue {
tempObj := TbSecurityGroupInfo{}
json.Unmarshal([]byte(v.Value), &tempObj)
res = append(res, tempObj)
}
return res, nil
case "spec":
res := []TbSpecInfo{}
for _, v := range keyValue {
tempObj := TbSpecInfo{}
json.Unmarshal([]byte(v.Value), &tempObj)
res = append(res, tempObj)
}
return res, nil
case "sshKey":
res := []TbSshKeyInfo{}
for _, v := range keyValue {
tempObj := TbSshKeyInfo{}
json.Unmarshal([]byte(v.Value), &tempObj)
res = append(res, tempObj)
}
return res, nil
case "vNet":
res := []TbVNetInfo{}
for _, v := range keyValue {
tempObj := TbVNetInfo{}
json.Unmarshal([]byte(v.Value), &tempObj)
res = append(res, tempObj)
}
return res, nil
}
//return true, nil
}
return nil, nil // When err == nil && keyValue == nil
}
func GetResource(nsId string, resourceType string, resourceId string) (interface{}, error) {
check, _ := CheckResource(nsId, resourceType, resourceId)
if !check {
errString := "The " + resourceType + " " + resourceId + " does not exist."
//mapA := map[string]string{"message": errString}
//mapB, _ := json.Marshal(mapA)
err := fmt.Errorf(errString)
return nil, err
}
fmt.Println("[Get resource] " + resourceType + ", " + resourceId)
key := common.GenResourceKey(nsId, resourceType, resourceId)
//fmt.Println(key)
keyValue, err := common.CBStore.Get(key)
if err != nil {
common.CBLog.Error(err)
return nil, err
}
if keyValue != nil {
switch resourceType {
case "image":
res := TbImageInfo{}
json.Unmarshal([]byte(keyValue.Value), &res)
return res, nil
case "securityGroup":
res := TbSecurityGroupInfo{}
json.Unmarshal([]byte(keyValue.Value), &res)
return res, nil
case "spec":
res := TbSpecInfo{}
json.Unmarshal([]byte(keyValue.Value), &res)
return res, nil
case "sshKey":
res := TbSshKeyInfo{}
json.Unmarshal([]byte(keyValue.Value), &res)
return res, nil
case "vNet":
res := TbVNetInfo{}
json.Unmarshal([]byte(keyValue.Value), &res)
return res, nil
}
//return true, nil
}
errString := "Cannot get " + resourceType + " " + resourceId + "."
err = fmt.Errorf(errString)
return nil, err
}
func CheckResource(nsId string, resourceType string, resourceId string) (bool, error) {
// Check parameters' emptiness
if nsId == "" {
err := fmt.Errorf("CheckResource failed; nsId given is null.")
return false, err
} else if resourceType == "" {
err := fmt.Errorf("CheckResource failed; resourceType given is null.")
return false, err
} else if resourceId == "" {
err := fmt.Errorf("CheckResource failed; resourceId given is null.")
return false, err
}
// Check resourceType's validity
if resourceType == "image" ||
resourceType == "sshKey" ||
resourceType == "spec" ||
resourceType == "vNet" ||
resourceType == "securityGroup" {
//resourceType == "subnet" ||
//resourceType == "publicIp" ||
//resourceType == "vNic" {
// continue
} else {
err := fmt.Errorf("invalid resource type")
return false, err
}
fmt.Println("[Check resource] " + resourceType + ", " + resourceId)
key := common.GenResourceKey(nsId, resourceType, resourceId)
//fmt.Println(key)
keyValue, err := common.CBStore.Get(key)
if err != nil {
common.CBLog.Error(err)
return false, err
}
if keyValue != nil {
return true, nil
}
return false, nil
}
/*
func convertSpiderResourceToTumblebugResource(resourceType string, i interface{}) (interface{}, error) {
if resourceType == "" {
err := fmt.Errorf("CheckResource failed; resourceType given is null.")
return nil, err
}
// Check resourceType's validity
if resourceType == "image" ||
resourceType == "sshKey" ||
resourceType == "spec" ||
resourceType == "vNet" ||
resourceType == "securityGroup" {
//resourceType == "subnet" ||
//resourceType == "publicIp" ||
//resourceType == "vNic" {
// continue
} else {
err := fmt.Errorf("invalid resource type")
return nil, err
}
}
*/
/*
func RestDelResource(c echo.Context) error {
nsId := c.Param("nsId")
resourceType := c.Param("resourceType")
resourceId := c.Param("resourceId")
forceFlag := c.QueryParam("force")
fmt.Printf("RestDelResource() called; %s %s %s \n", nsId, resourceType, resourceId) // for debug
responseCode, _, err := DelResource(nsId, resourceType, resourceId, forceFlag)
if err != nil {
common.CBLog.Error(err)
mapA := map[string]string{"message": err.Error()}
return c.JSON(responseCode, &mapA)
}
mapA := map[string]string{"message": "The " + resourceType + " " + resourceId + " has been deleted"}
return c.JSON(http.StatusOK, &mapA)
}
func RestDelAllResources(c echo.Context) error {
nsId := c.Param("nsId")
resourceType := c.Param("resourceType")
forceFlag := c.QueryParam("force")
resourceList := ListResourceId(nsId, resourceType)
if len(resourceList) == 0 {
mapA := map[string]string{"message": "There is no " + resourceType + " element in this namespace."}
return c.JSON(http.StatusNotFound, &mapA)
} else {
for _, v := range resourceList {
responseCode, _, err := DelResource(nsId, resourceType, v, forceFlag)
if err != nil {
common.CBLog.Error(err)
mapA := map[string]string{"message": err.Error()}
return c.JSON(responseCode, &mapA)
}
}
mapA := map[string]string{"message": "All " + resourceType + "s has been deleted"}
return c.JSON(http.StatusOK, &mapA)
}
}
*/
// https://stackoverflow.com/questions/45139954/dynamic-struct-as-parameter-golang
type ReturnValue struct {
CustomStruct interface{}
}
type NameOnly struct {
Name string
}
func GetNameFromStruct(u interface{}) string {
var result = ReturnValue{CustomStruct: u}
//fmt.Println(result)
msg, ok := result.CustomStruct.(NameOnly)
if ok {
//fmt.Printf("Message1 is %s\n", msg.Name)
return msg.Name
} else {
return ""
}
}
//func createResource(nsId string, resourceType string, u interface{}) (interface{}, int, []byte, error) {
| [
"\"SPIDER_REST_URL\"",
"\"SPIDER_CALL_METHOD\"",
"\"CBTUMBLEBUG_ROOT\""
]
| []
| [
"CBTUMBLEBUG_ROOT",
"SPIDER_CALL_METHOD",
"SPIDER_REST_URL"
]
| [] | ["CBTUMBLEBUG_ROOT", "SPIDER_CALL_METHOD", "SPIDER_REST_URL"] | go | 3 | 0 | |
kubeconfig/kubeconfig.go | /*
------------------------------------------------------------------------
Modified from the original example code to first connect with
incluster configuration then if unsuccessful external kubeconfig
format file
------------------------------------------------------------------------
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Note: the example only works with the code within the same
// release/branch.
package kubeconfig
import (
"os"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
// Uncomment the following line to load the gcp plugin (only
// required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/clientcmd"
)
// InCluster true when endpoints are accessible, when the service is
// running with the cluster's network namespaces
var InCluster bool
// CheckInCluster reports if the env variable is set for cluster
func CheckInCluster() bool {
return len(os.Getenv("KUBERNETES_PORT")) > 0
}
func init() {
InCluster = CheckInCluster()
}
// NewClientset returns a new handle to a kubernetes client takes
// kubeconfig path arg
func NewClientset(kubeconfig string) *kubernetes.Clientset {
// kubeRestConfig kubernetes config object
var kubeRestConfig *restclient.Config
// clientset is a handle to execute kubernetes commands
var clientset *kubernetes.Clientset
var err error
// creates the in-cluster configuration
kubeRestConfig, err = restclient.InClusterConfig()
if err != nil {
// try with a kubeconfig file
kubeRestConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
// InCluster = true
}
if err == nil {
// creates the clientset
clientset, err = kubernetes.NewForConfig(kubeRestConfig)
}
return clientset
}
| [
"\"KUBERNETES_PORT\""
]
| []
| [
"KUBERNETES_PORT"
]
| [] | ["KUBERNETES_PORT"] | go | 1 | 0 | |
src/main/java/hu/bme/mit/spaceship/TorpedoStore.java | package hu.bme.mit.spaceship;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.Random;
/**
* Class storing and managing the torpedoes of a ship
*
* (Deliberately contains bugs.)
*/
public class TorpedoStore {
// rate of failing to fire torpedos [0.0, 1.0]
private double FAILURE_RATE = 0.0; //NOSONAR
private int torpedoCount = 0;
public TorpedoStore(int numberOfTorpedos){
this.torpedoCount = numberOfTorpedos;
// update failure rate if it was specified in an environment variable
String failureEnv = System.getenv("IVT_RATE");
if (failureEnv != null){
try {
FAILURE_RATE = Double.parseDouble(failureEnv);
} catch (NumberFormatException nfe) {
FAILURE_RATE = 0.0;
}
}
}
public boolean fire(int numberOfTorpedos){
if(numberOfTorpedos < 1 || numberOfTorpedos > this.torpedoCount){
throw new IllegalArgumentException("numberOfTorpedos");
}
boolean success = false;
// simulate random overheating of the launcher bay which prevents firing
Random generator = SecureRandom.getInstanceStrong();
double r = generator.nextDouble();
if (r >= FAILURE_RATE) {
// successful firing
this.torpedoCount -= numberOfTorpedos;
success = true;
} else {
// simulated failure
success = false;
}
return success;
}
public boolean isEmpty(){
return this.torpedoCount <= 0;
}
public int getTorpedoCount() {
return this.torpedoCount;
}
}
| [
"\"IVT_RATE\""
]
| []
| [
"IVT_RATE"
]
| [] | ["IVT_RATE"] | java | 1 | 0 | |
cmd/metawriter/main.go | package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/setlog/trivrost/pkg/launcher/config"
)
var (
launcherConfigPath string
versionInfoTemplatePath, versionInfoPath string
mainExeManifestTemplatePath, mainExeManifestPath string
infoPListTemplatePath, infoPListPath string
)
var (
launcherConfig *config.LauncherConfig
versionSemantic, versionFull string
)
func main() {
parseFlags()
determineVariables()
validateVariables()
writeMetaFiles()
}
func writeMetaFiles() {
mustWriteFile(versionInfoPath, []byte(replacePlaceholders(mustReadFile(versionInfoTemplatePath))))
mustWriteFile(mainExeManifestPath, []byte(replacePlaceholders(mustReadFile(mainExeManifestTemplatePath))))
mustWriteFile(infoPListPath, []byte(replacePlaceholders(mustReadFile(infoPListTemplatePath))))
}
func replacePlaceholders(text string) string {
text = strings.Replace(text, "${LAUNCHER_BINARY}", os.Getenv("LAUNCHER_BINARY"), -1)
text = strings.Replace(text, "${LAUNCHER_BINARY_EXT}", os.Getenv("LAUNCHER_BINARY_EXT"), -1)
text = strings.Replace(text, "${LAUNCHER_VENDOR_NAME}", launcherConfig.VendorName, -1)
text = strings.Replace(text, "${LAUNCHER_BRANDING_NAME}", launcherConfig.BrandingName, -1)
text = strings.Replace(text, "${LAUNCHER_BRANDING_NAME_SHORT}", launcherConfig.BrandingNameShort, -1)
text = strings.Replace(text, "${LAUNCHER_REVERSE_DNS_PRODUCT_ID}", launcherConfig.ReverseDnsProductId, -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_MAJOR}", strconv.Itoa(launcherConfig.ProductVersion.Major), -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_MINOR}", strconv.Itoa(launcherConfig.ProductVersion.Minor), -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_PATCH}", strconv.Itoa(launcherConfig.ProductVersion.Patch), -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_BUILD}", strconv.Itoa(launcherConfig.ProductVersion.Build), -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_SEMANTIC}", versionSemantic, -1)
text = strings.Replace(text, "${LAUNCHER_VERSION_FULL}", versionFull, -1)
return text
}
func mustReadFile(filePath string) string {
fmt.Printf("Metawriter: Reading \"%s\".\n", filePath)
data, err := ioutil.ReadFile(filePath)
if err != nil {
fatalf("Could not read \"%s\": %v", filePath, err)
}
return string(data)
}
func mustWriteFile(filePath string, data []byte) {
fmt.Printf("Metawriter: Writing \"%s\".\n", filePath)
err := ioutil.WriteFile(filePath, data, 0600)
if err != nil {
fatalf("Could not open file \"%s\" for writing: %v", filePath, err)
}
}
func determineVariables() {
launcherConfig = config.ReadLauncherConfigFromReader(mustReaderForFile(launcherConfigPath))
versionSemantic = fmt.Sprintf("%d.%d.%d", launcherConfig.ProductVersion.Major, launcherConfig.ProductVersion.Minor, launcherConfig.ProductVersion.Patch)
versionFull = fmt.Sprintf("%s.%d", versionSemantic, launcherConfig.ProductVersion.Build)
}
func validateVariables() {
if launcherConfig.DeploymentConfigURL == "" {
fatalf("'DeploymentConfigURL' is not set in the launcher config.")
}
if launcherConfig.VendorName == "" {
fatalf("'VendorName' is not set in the launcher config.")
}
if launcherConfig.ProductName == "" {
fatalf("'ProductName' is not set in the launcher config.")
}
if launcherConfig.BrandingName == "" {
fatalf("'BrandingName' is not set in the launcher config.")
}
if launcherConfig.BrandingNameShort == "" {
fatalf("'BrandingNameShort' is not set in the launcher config.")
}
if len(launcherConfig.BrandingNameShort) > 15 {
fatalf("'BrandingNameShort' in the launcher config is longer than 15 bytes.")
}
if launcherConfig.ReverseDnsProductId == "" {
fatalf("'ReverseDnsProductId' is not set in the launcher config.")
}
if launcherConfig.BinaryName == "" {
fatalf("'BinaryName' is not set in the launcher config.")
}
if versionFull == "0.0.0.0" {
fmt.Println("Warning: 'ProductVersion' is not set or is '0.0.0.0' in the launcher config. This is not fatal but users might think it looks strange.")
}
}
func mustReaderForFile(filePath string) io.Reader {
data, err := ioutil.ReadFile(filePath)
if err != nil {
fatalf("Could not read file \"%s\": %v", filePath, err)
}
return bytes.NewReader(data)
}
func parseFlags() {
flag.Parse()
if flag.NArg() != 7 {
fatalf("Need 7 args: launcherConfigPath versionInfoTemplatePath versionInfoPath mainExeManifestTemplatePath mainExeManifestPath infoPListTemplatePath infoPListPath")
}
launcherConfigPath = flag.Arg(0)
versionInfoTemplatePath, versionInfoPath = flag.Arg(1), flag.Arg(2)
mainExeManifestTemplatePath, mainExeManifestPath = flag.Arg(3), flag.Arg(4)
infoPListTemplatePath, infoPListPath = flag.Arg(5), flag.Arg(6)
if launcherConfigPath == "" {
fatalf("launcher config path (1st arg) empty")
}
if versionInfoTemplatePath == "" {
fatalf("version info template path (2nd arg) empty")
}
if versionInfoPath == "" {
fatalf("version info path (3rd arg) empty")
}
if mainExeManifestTemplatePath == "" {
fatalf("main exe manifest template path (4th arg) empty")
}
if mainExeManifestPath == "" {
fatalf("main exe manifest path (5th arg) empty")
}
if infoPListTemplatePath == "" {
fatalf("info plist template path (6th arg) empty")
}
if infoPListPath == "" {
fatalf("info plist path (7th arg) empty")
}
}
func fatalf(formatMessage string, args ...interface{}) {
fmt.Printf("Fatal: "+formatMessage+"\n", args...)
os.Exit(1)
}
| [
"\"LAUNCHER_BINARY\"",
"\"LAUNCHER_BINARY_EXT\""
]
| []
| [
"LAUNCHER_BINARY",
"LAUNCHER_BINARY_EXT"
]
| [] | ["LAUNCHER_BINARY", "LAUNCHER_BINARY_EXT"] | go | 2 | 0 | |
build_scripts/CompileOpenSSL-Linux.py | import os
from subprocess import call
import sys
import re
import multiprocessing as mp
import string
import shutil
configure_flags = "no-shared"
cflags = "-fPIC"
base_openssl_version = "1.1.1"
def is_python3_or_higher():
return sys.version_info.major >= 3
def get_openssl_filename(ver):
return "openssl-" + ver + ".tar.gz"
def get_openssl_link(ver):
link = "https://www.openssl.org/source/" + get_openssl_filename(ver)
# print(link)
return link
def download_file_python2(filelink, target):
import urllib
try:
testfile = urllib.URLopener()
try:
os.remove(target)
print("Found file " + target + ", which is now deleted.")
except:
pass
testfile.retrieve(filelink, target)
return True
except:
return False
def download_file_python3(filelink, target):
import urllib.request
try:
try:
os.remove(target)
print("Found file " + target + ", which is now deleted.")
except:
pass
with urllib.request.urlopen(filelink) as response, open(target, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return True
except Exception as e:
return False
def download_file(filelink, target):
if is_python3_or_higher():
return download_file_python3(filelink, target)
else:
return download_file_python2(filelink, target)
def download_openssl():
openssl_version_found = False
filename_ = ""
for ver_suffix in list(reversed(string.ascii_lowercase))+[""]:
version_str = base_openssl_version + ver_suffix
if(download_file(get_openssl_link(version_str), get_openssl_filename(version_str))):
openssl_version_found = True
filename_ = get_openssl_filename(version_str)
print("Found latest OpenSSL version to be " + version_str)
break
if openssl_version_found == False:
print("Could not find the latest OpenSSL version. Probably you're not connected to the internet.")
print("If you have already downloaded OpenSSL, put the file name in the first argument of the script.")
return filename_
if len(sys.argv) < 2:
filename = download_openssl()
else:
filename = sys.argv[1]
dirname = filename.replace(".tar.gz","")
try:
shutil.rmtree(dirname)
except:
pass
working_dir = os.getcwd()
if not bool(re.match("(openssl-){1}(\d)+(.)(\d)+(.)(\d)+(\w)*(.tar.gz)",filename)):
print("The file given '" + filename + "' doesn't seem to be an openssl source file. It must be in the form: openssl-x.y.zw.tar.gz")
exit(1)
call("tar -xf " + filename, shell=True) #extract the .tar.gz file
dirname_bin = dirname + "_build"
final_dirname = "openssl_build"
try:
shutil.rmtree(dirname_bin)
except:
pass
try:
shutil.rmtree(final_dirname)
except:
pass
#Go back to base dir
os.chdir(working_dir)
################
os.chdir(dirname)
# prepend ccache to the path, necessary since prior steps prepend things to the path
os.environ['PATH'] = '/usr/lib/ccache:' + os.environ['PATH']
call("CFLAGS=" + cflags + " ./config " + configure_flags + " --prefix=" + os.path.join(working_dir,dirname_bin) + " " + configure_flags,shell=True)
call(r"make -j" + str(mp.cpu_count()), shell=True)
call(r"make install", shell=True)
print("Compilation complete.")
#Go back to base dir
os.chdir(working_dir)
################
call(r"ln -s " + dirname_bin + " " + final_dirname,shell=True)
print("")
print("OpenSSL compiled to \"" + os.path.join(working_dir,final_dirname) + "\" with a soft link to \"" + os.path.join(working_dir,dirname_bin) + "\"")
print("")
print("OpenSSL lib path: " + os.path.join(working_dir,final_dirname,"lib"))
print("OpenSSL include path: " + os.path.join(working_dir,final_dirname,"include"))
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
example/annictV2/main.go | package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/vanillaricewraps/gqlgenc/clientv2"
"github.com/vanillaricewraps/gqlgenc/example/annictV2/gen"
)
func main() {
key := os.Getenv("ANNICT_KEY")
annictClient := NewAnnictClient(clientv2.NewClient(http.DefaultClient, "https://api.annict.com/graphql", func(ctx context.Context, req *http.Request, gqlInfo *clientv2.GQLRequestInfo, res interface{}, next clientv2.RequestInterceptorFunc) error {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", key))
return next(ctx, req, gqlInfo, res)
}))
ctx := context.Background()
getProfile, err := annictClient.GetProfile(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(*getProfile.Viewer.AvatarURL, getProfile.Viewer.RecordsCount, getProfile.Viewer.WatchedCount)
list, err := annictClient.SearchWorks(ctx, []string{"2017-spring"})
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
for _, node := range list.SearchWorks.Nodes {
fmt.Println(node.ID, node.AnnictID, node.Title, *node.Work.Image.RecommendedImageURL)
}
getWork, err := annictClient.GetWork(ctx, []int64{list.SearchWorks.Nodes[0].AnnictID})
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
work := getWork.SearchWorks.Nodes[0]
_, err = annictClient.UpdateWorkStatus(ctx, work.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
_, err = annictClient.CreateRecordMutation(ctx, work.Episodes.Nodes[0].ID)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
getProfile2, err := annictClient.GetProfile(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(getProfile2.Viewer.RecordsCount, getProfile2.Viewer.WatchedCount)
res, err := annictClient.ListWorks(ctx, nil, nil, 5)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(res.Viewer.Works.Edges[0].Node.Title, res.Viewer.Works.Edges[0].Cursor, len(res.Viewer.Works.Edges))
}
func NewAnnictClient(c *clientv2.Client) *gen.Client {
return &gen.Client{Client: c}
}
| [
"\"ANNICT_KEY\""
]
| []
| [
"ANNICT_KEY"
]
| [] | ["ANNICT_KEY"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"runtime"
"github.com/gin-gonic/gin"
)
func main() {
for i := 0; i < 1000000; i++ {
fmt.Println("new deploy")
resp, err := http.Get("http://mz-render-play:10000")
if err != nil {
fmt.Println(err)
}
fmt.Println(resp)
}
//ConfigRuntime()
//StartWorkers()
//StartGin()
}
// ConfigRuntime sets the number of operating system threads.
func ConfigRuntime() {
nuCPU := runtime.NumCPU()
runtime.GOMAXPROCS(nuCPU)
fmt.Printf("Running with %d CPUs\n", nuCPU)
}
// StartWorkers start starsWorker by goroutine.
func StartWorkers() {
go statsWorker()
}
// StartGin starts gin web server with setting router.
func StartGin() {
gin.SetMode(gin.ReleaseMode)
router := gin.New()
router.Use(rateLimit, gin.Recovery())
router.LoadHTMLGlob("resources/*.templ.html")
router.Static("/static", "resources/static")
router.GET("/", index)
router.GET("/room/:roomid", roomGET)
router.POST("/room-post/:roomid", roomPOST)
router.GET("/stream/:roomid", streamRoom)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
if err := router.Run(":" + port); err != nil {
log.Panicf("error: %s", err)
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
packer/gotool.go | package packer
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/gokrazy/tools/internal/measure"
"golang.org/x/sync/errgroup"
)
var env = goEnv()
func goEnv() []string {
goarch := "arm64" // Raspberry Pi 3
if e := os.Getenv("GOARCH"); e != "" {
goarch = e
}
goos := "linux" // Raspberry Pi 3
if e := os.Getenv("GOOS"); e != "" {
goos = e
}
env := os.Environ()
for idx, e := range env {
if strings.HasPrefix(e, "CGO_ENABLED=") {
env[idx] = "CGO_ENABLED=0"
}
if strings.HasPrefix(e, "GOBIN=") {
env[idx] = "GOBIN="
}
}
return append(env,
fmt.Sprintf("GOARCH=%s", goarch),
fmt.Sprintf("GOOS=%s", goos),
"CGO_ENABLED=0",
"GOBIN=")
}
func Env() []string { return env }
func InitDeps(initPkg string) []string {
if initPkg != "" {
return []string{initPkg}
}
// The default init template requires github.com/gokrazy/gokrazy:
return []string{"github.com/gokrazy/gokrazy"}
}
func Build(bindir string, packages []string, packageBuildFlags map[string][]string, noBuildPackages []string) error {
done := measure.Interactively("building (go compiler)")
defer done("")
incompletePkgs := make([]string, 0, len(packages)+len(noBuildPackages))
incompletePkgs = append(incompletePkgs, packages...)
incompletePkgs = append(incompletePkgs, noBuildPackages...)
// run “go get” for incomplete packages (most likely just not present)
cmd := exec.Command("go",
append([]string{
"list",
"-mod=mod",
"-e",
"-f", "{{ .ImportPath }} {{ if .Incomplete }}error{{ else }}ok{{ end }}",
}, incompletePkgs...)...)
cmd.Env = env
cmd.Stderr = os.Stderr
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("%v: %v", cmd.Args, err)
}
var incomplete []string
const errorSuffix = " error"
for _, line := range strings.Split(string(output), "\n") {
if !strings.HasSuffix(line, errorSuffix) {
continue
}
incomplete = append(incomplete, strings.TrimSuffix(line, errorSuffix))
}
if len(incomplete) > 0 {
log.Printf("getting incomplete packages %v", incomplete)
cmd = exec.Command("go",
append([]string{
"get",
}, incomplete...)...)
cmd.Env = env
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%v: %v", cmd.Args, err)
}
}
mainPkgs, err := MainPackages(packages)
if err != nil {
return err
}
var eg errgroup.Group
for _, pkg := range mainPkgs {
pkg := pkg // copy
eg.Go(func() error {
args := []string{
"build",
"-mod=mod",
"-tags", "gokrazy",
"-o", filepath.Join(bindir, filepath.Base(pkg.Target)),
}
if buildFlags := packageBuildFlags[pkg.ImportPath]; len(buildFlags) > 0 {
args = append(args, buildFlags...)
}
args = append(args, pkg.ImportPath)
cmd := exec.Command("go", args...)
cmd.Env = env
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("%v: %v", cmd.Args, err)
}
return nil
})
}
return eg.Wait()
}
type Pkg struct {
Name string `json:"Name"`
ImportPath string `json:"ImportPath"`
Target string `json:"Target"`
}
func (p *Pkg) Basename() string {
return filepath.Base(p.Target)
}
func MainPackages(paths []string) ([]Pkg, error) {
// Shell out to the go tool for path matching (handling “...”)
var buf bytes.Buffer
cmd := exec.Command("go", append([]string{"list", "-tags", "gokrazy", "-json"}, paths...)...)
cmd.Env = env
cmd.Stdout = &buf
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v", cmd.Args, err)
}
var result []Pkg
dec := json.NewDecoder(&buf)
for {
var p Pkg
if err := dec.Decode(&p); err == io.EOF {
break
} else if err != nil {
return nil, err
}
if p.Name != "main" {
continue
}
result = append(result, p)
}
return result, nil
}
func PackageDir(pkg string) (string, error) {
cmd := exec.Command("go", "list", "-tags", "gokrazy", "-f", "{{ .Dir }}", pkg)
cmd.Stderr = os.Stderr
b, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("%v: %v", cmd.Args, err)
}
return strings.TrimSpace(string(b)), nil
}
| [
"\"GOARCH\"",
"\"GOOS\""
]
| []
| [
"GOARCH",
"GOOS"
]
| [] | ["GOARCH", "GOOS"] | go | 2 | 0 | |
tests/test_oauth2.py | # -*- coding: utf-8 -*-
import unittest
import os
from wecom_sdk.base.exception import WeComSDKException
from wecom_sdk.oauth2 import WeComAuthAPIClient
# ----- 环境变量读取密钥 -----
from environs import Env
Env().read_env()
CORPID = os.environ.get('CORPID')
CONTACT_SECRET = os.environ.get('CONTACT_SECRET')
EXTERNAL_CONTACT_SECRET = os.environ.get('EXTERNAL_CONTACT_SECRET')
# ----- 单元测试用例 -----
class WeComAuthAPIClientTestCase(unittest.TestCase):
contact_client = WeComAuthAPIClient(CORPID, CONTACT_SECRET)
external_contact_client = WeComAuthAPIClient(CORPID, EXTERNAL_CONTACT_SECRET)
def test_user_auth_with_invalid_code(self):
invalid_code = 'gegwgessege'
with self.assertRaises(WeComSDKException) as e:
self.contact_client.get_userinfo_with_auth_code(invalid_code)
self.assertEqual(e.exception.errcode, '40029')
def test_external_user_auth_with_invalid_code(self):
invalid_code = 'gegwgessege'
with self.assertRaises(WeComSDKException) as e:
self.external_contact_client.get_userinfo_with_auth_code(invalid_code)
self.assertEqual(e.exception.errcode, '40029')
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"EXTERNAL_CONTACT_SECRET",
"CONTACT_SECRET",
"CORPID"
]
| [] | ["EXTERNAL_CONTACT_SECRET", "CONTACT_SECRET", "CORPID"] | python | 3 | 0 | |
ssh_tunnel.go | package main
import (
"fmt"
"net"
"os"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/knownhosts"
)
type SSHConnConfig struct {
Host string
Port string
User string
Password string
}
func NewSSHClient(config *SSHConnConfig) (*ssh.Client, error) {
sshConfig := &ssh.ClientConfig{
User: config.User,
Auth: []ssh.AuthMethod{SSHAgent()},
}
if config.Password != "" {
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(config.Password))
}
if homeDir, err := os.UserHomeDir(); err == nil {
if hostKeyCallback, err := knownhosts.New(fmt.Sprintf("%s/.ssh/known_hosts", homeDir)); err == nil {
sshConfig.HostKeyCallback = hostKeyCallback
}
}
return ssh.Dial("tcp", net.JoinHostPort(config.Host, config.Port), sshConfig)
}
func SSHAgent() ssh.AuthMethod {
if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
}
return nil
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
pkg/gui/quitting.go | package gui
import (
"os"
"github.com/jesseduffield/gocui"
)
// when a user runs lazygit with the LAZYGIT_NEW_DIR_FILE env variable defined
// we will write the current directory to that file on exit so that their
// shell can then change to that directory. That means you don't get kicked
// back to the directory that you started with.
func (gui *Gui) recordCurrentDirectory() error {
if os.Getenv("LAZYGIT_NEW_DIR_FILE") == "" {
return nil
}
// determine current directory, set it in LAZYGIT_NEW_DIR_FILE
dirName, err := os.Getwd()
if err != nil {
return err
}
return gui.OSCommand.CreateFileWithContent(os.Getenv("LAZYGIT_NEW_DIR_FILE"), dirName)
}
func (gui *Gui) handleQuitWithoutChangingDirectory(g *gocui.Gui, v *gocui.View) error {
gui.State.RetainOriginalDir = true
return gui.quit()
}
func (gui *Gui) handleQuit() error {
gui.State.RetainOriginalDir = false
return gui.quit()
}
func (gui *Gui) handleTopLevelReturn(g *gocui.Gui, v *gocui.View) error {
currentContext := gui.currentContext()
parentContext, hasParent := currentContext.GetParentContext()
if hasParent && currentContext != nil && parentContext != nil {
// TODO: think about whether this should be marked as a return rather than adding to the stack
return gui.switchContext(parentContext)
}
for _, mode := range gui.modeStatuses() {
if mode.isActive() {
return mode.reset()
}
}
if gui.Config.GetUserConfig().GetBool("quitOnTopLevelReturn") {
return gui.handleQuit()
}
return nil
}
func (gui *Gui) quit() error {
if gui.State.Updating {
return gui.createUpdateQuitConfirmation()
}
if gui.Config.GetUserConfig().GetBool("confirmOnQuit") {
return gui.ask(askOpts{
title: "",
prompt: gui.Tr.SLocalize("ConfirmQuit"),
handleConfirm: func() error {
return gocui.ErrQuit
},
})
}
return gocui.ErrQuit
}
| [
"\"LAZYGIT_NEW_DIR_FILE\"",
"\"LAZYGIT_NEW_DIR_FILE\""
]
| []
| [
"LAZYGIT_NEW_DIR_FILE"
]
| [] | ["LAZYGIT_NEW_DIR_FILE"] | go | 1 | 0 | |
geode-core/src/main/java/org/apache/geode/internal/net/SocketCreator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.net;
import java.io.Console;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.channels.SocketChannel;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.Principal;
import java.security.PrivateKey;
import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import javax.net.ssl.KeyManager;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SNIHostName;
import javax.net.ssl.SNIServerName;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLHandshakeException;
import javax.net.ssl.SSLParameters;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLProtocolException;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.StandardConstants;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509ExtendedKeyManager;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.validator.routines.InetAddressValidator;
import org.apache.logging.log4j.Logger;
import org.apache.geode.GemFireConfigException;
import org.apache.geode.SystemFailure;
import org.apache.geode.annotations.VisibleForTesting;
import org.apache.geode.annotations.internal.MakeNotStatic;
import org.apache.geode.cache.wan.GatewaySender;
import org.apache.geode.cache.wan.GatewayTransportFilter;
import org.apache.geode.distributed.ClientSocketFactory;
import org.apache.geode.distributed.internal.DistributionConfig;
import org.apache.geode.distributed.internal.DistributionConfigImpl;
import org.apache.geode.distributed.internal.tcpserver.AdvancedSocketCreatorImpl;
import org.apache.geode.distributed.internal.tcpserver.HostAndPort;
import org.apache.geode.distributed.internal.tcpserver.TcpSocketCreatorImpl;
import org.apache.geode.internal.ClassPathLoader;
import org.apache.geode.internal.admin.SSLConfig;
import org.apache.geode.internal.cache.wan.TransportFilterServerSocket;
import org.apache.geode.internal.cache.wan.TransportFilterSocketFactory;
import org.apache.geode.internal.inet.LocalHostUtil;
import org.apache.geode.internal.util.ArgumentRedactor;
import org.apache.geode.internal.util.PasswordUtil;
import org.apache.geode.logging.internal.log4j.api.LogService;
import org.apache.geode.net.SSLParameterExtension;
import org.apache.geode.util.internal.GeodeGlossary;
/**
* SocketCreators are built using a SocketCreatorFactory using Geode distributed-system properties.
* They know how to properly configure sockets for TLS (SSL) communications and perform
* handshakes. Connection-initiation uses a HostAndPort instance that is similar to an
* InetSocketAddress.
* <p>
* SocketCreator also supports a client-socket-factory that is designated with the property
* gemfire.clientSocketFactory for use in creating client->server connections.
*/
public class SocketCreator extends TcpSocketCreatorImpl {
private static final Logger logger = LogService.getLogger();
/**
* flag to force always using DNS (regardless of the fact that these lookups can hang)
*/
public static final boolean FORCE_DNS_USE =
Boolean.getBoolean(GeodeGlossary.GEMFIRE_PREFIX + "forceDnsUse");
/**
* set this to false to inhibit host name lookup
*/
@MakeNotStatic
public static volatile boolean resolve_dns = true;
/**
* set this to false to use an inet_addr in a client's ID
*/
@MakeNotStatic
public static volatile boolean use_client_host_name = true;
@MakeNotStatic
private static final ConcurrentHashMap<InetAddress, String> hostNames = new ConcurrentHashMap<>();
/**
* Only print this SocketCreator's config once
*/
private boolean configShown = false;
/**
* Only print hostname validation disabled log once
*/
private boolean hostnameValidationDisabledLogShown = false;
private SSLContext sslContext;
private final SSLConfig sslConfig;
private ClientSocketFactory clientSocketFactory;
/**
* Whether to enable TCP keep alive for sockets. This boolean is controlled by the
* gemfire.setTcpKeepAlive java system property. If not set then GemFire will enable keep-alive on
* server->client and p2p connections.
*/
public static final boolean ENABLE_TCP_KEEP_ALIVE =
AdvancedSocketCreatorImpl.ENABLE_TCP_KEEP_ALIVE;
// -------------------------------------------------------------------------
// Static instance accessors
// -------------------------------------------------------------------------
/**
* This method has migrated to LocalHostUtil but is kept in place here for
* backward-compatibility testing.
*
* @deprecated use LocalHostUtil.getLocalHost()
*/
public static InetAddress getLocalHost() throws UnknownHostException {
return LocalHostUtil.getLocalHost();
}
/**
* returns the host name for the given inet address, using a local cache of names to avoid dns
* hits and duplicate strings
*/
public static String getHostName(InetAddress addr) {
String result = hostNames.get(addr);
if (result == null) {
result = addr.getHostName();
hostNames.put(addr, result);
}
return result;
}
/**
* Reset the hostNames caches
*/
public static void resetHostNameCache() {
hostNames.clear();
}
// -------------------------------------------------------------------------
// Constructor
// -------------------------------------------------------------------------
/**
* Constructs new SocketCreator instance.
*/
public SocketCreator(final SSLConfig sslConfig) {
this.sslConfig = sslConfig;
initialize();
}
/** returns the hostname or address for this client */
public static String getClientHostName() throws UnknownHostException {
InetAddress hostAddr = LocalHostUtil.getLocalHost();
return SocketCreator.use_client_host_name ? hostAddr.getCanonicalHostName()
: hostAddr.getHostAddress();
}
// -------------------------------------------------------------------------
// Initializers (change SocketCreator state)
// -------------------------------------------------------------------------
protected void initializeCreators() {
clusterSocketCreator = new SCClusterSocketCreator(this);
clientSocketCreator = new SCClientSocketCreator(this);
advancedSocketCreator = new SCAdvancedSocketCreator(this);
}
/**
* Initialize this SocketCreator.
* <p>
* Caller must synchronize on the SocketCreator instance.
*/
private void initialize() {
try {
try {
if (this.sslConfig.isEnabled() && getSslContext() == null) {
sslContext = createAndConfigureSSLContext();
}
} catch (Exception e) {
throw new GemFireConfigException("Error configuring GemFire ssl ", e);
}
// make sure TCPConduit picks up p2p properties...
org.apache.geode.internal.tcp.TCPConduit.init();
initializeClientSocketFactory();
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// If this ever returns, rethrow the error. We're poisoned
// now, so don't let this thread continue.
throw err;
} catch (Error t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
t.printStackTrace();
throw t;
} catch (RuntimeException re) {
re.printStackTrace();
throw re;
}
}
/**
* Creates & configures the SSLContext when SSL is enabled.
*
* @return new SSLContext configured using the given protocols & properties
*
* @throws GeneralSecurityException if security information can not be found
* @throws IOException if information can not be loaded
*/
private SSLContext createAndConfigureSSLContext() throws GeneralSecurityException, IOException {
if (sslConfig.useDefaultSSLContext()) {
return SSLContext.getDefault();
}
SSLContext newSSLContext = SSLUtil.getSSLContextInstance(sslConfig);
KeyManager[] keyManagers = getKeyManagers();
TrustManager[] trustManagers = getTrustManagers();
newSSLContext.init(keyManagers, trustManagers, null /* use the default secure random */);
return newSSLContext;
}
/**
* Used by SystemAdmin to read the properties from console
*
* @param env Map in which the properties are to be read from console.
*/
public static void readSSLProperties(Map<String, String> env) {
readSSLProperties(env, false);
}
/**
* Used to read the properties from console. AgentLauncher calls this method directly & ignores
* gemfire.properties. SystemAdmin calls this through {@link #readSSLProperties(Map)} and does
* NOT ignore gemfire.properties.
*
* @param env Map in which the properties are to be read from console.
* @param ignoreGemFirePropsFile if <code>false</code> existing gemfire.properties file is read,
* if <code>true</code>, properties from gemfire.properties file are ignored.
*/
public static void readSSLProperties(Map<String, String> env, boolean ignoreGemFirePropsFile) {
Properties props = new Properties();
DistributionConfigImpl.loadGemFireProperties(props, ignoreGemFirePropsFile);
for (Map.Entry<Object, Object> ent : props.entrySet()) {
String key = (String) ent.getKey();
// if the value of ssl props is empty, read them from console
if (key.startsWith(DistributionConfig.SSL_SYSTEM_PROPS_NAME)
|| key.startsWith(DistributionConfig.SYS_PROP_NAME)) {
if (key.startsWith(DistributionConfig.SYS_PROP_NAME)) {
key = key.substring(DistributionConfig.SYS_PROP_NAME.length());
}
final String value = (String) ent.getValue();
if (value == null || value.trim().equals("")) {
Console console = System.console();
if (console == null) {
throw new GemFireConfigException(
"SSL properties are empty, but a console is not available");
}
String val = console.readLine("Please enter " + key + ": ");
env.put(key, val);
}
}
}
}
private TrustManager[] getTrustManagers()
throws KeyStoreException, NoSuchAlgorithmException, CertificateException, IOException {
TrustManager[] trustManagers;
String trustStoreType = sslConfig.getTruststoreType();
if (StringUtils.isEmpty(trustStoreType)) {
trustStoreType = KeyStore.getDefaultType();
}
KeyStore ts = KeyStore.getInstance(trustStoreType);
String trustStorePath = sslConfig.getTruststore();
FileInputStream fis = new FileInputStream(trustStorePath);
String passwordString = sslConfig.getTruststorePassword();
char[] password = null;
if (passwordString != null) {
if (passwordString.trim().equals("")) {
if (!StringUtils.isEmpty(passwordString)) {
String toDecrypt = "encrypted(" + passwordString + ")";
passwordString = PasswordUtil.decrypt(toDecrypt);
password = passwordString.toCharArray();
}
} else {
password = passwordString.toCharArray();
}
}
ts.load(fis, password);
// default algorithm can be changed by setting property "ssl.TrustManagerFactory.algorithm" in
// security properties
TrustManagerFactory tmf =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ts);
trustManagers = tmf.getTrustManagers();
// follow the security tip in java doc
if (password != null) {
java.util.Arrays.fill(password, ' ');
}
return trustManagers;
}
private KeyManager[] getKeyManagers() throws KeyStoreException, IOException,
NoSuchAlgorithmException, CertificateException, UnrecoverableKeyException {
if (sslConfig.getKeystore() == null) {
return null;
}
KeyManager[] keyManagers;
String keyStoreType = sslConfig.getKeystoreType();
if (StringUtils.isEmpty(keyStoreType)) {
keyStoreType = KeyStore.getDefaultType();
}
KeyStore keyStore = KeyStore.getInstance(keyStoreType);
String keyStoreFilePath = sslConfig.getKeystore();
if (StringUtils.isEmpty(keyStoreFilePath)) {
keyStoreFilePath =
System.getProperty("user.home") + System.getProperty("file.separator") + ".keystore";
}
FileInputStream fileInputStream = new FileInputStream(keyStoreFilePath);
String passwordString = sslConfig.getKeystorePassword();
char[] password = null;
if (passwordString != null) {
if (passwordString.trim().equals("")) {
String encryptedPass = System.getenv("javax.net.ssl.keyStorePassword");
if (!StringUtils.isEmpty(encryptedPass)) {
String toDecrypt = "encrypted(" + encryptedPass + ")";
passwordString = PasswordUtil.decrypt(toDecrypt);
password = passwordString.toCharArray();
}
} else {
password = passwordString.toCharArray();
}
}
keyStore.load(fileInputStream, password);
// default algorithm can be changed by setting property "ssl.KeyManagerFactory.algorithm" in
// security properties
KeyManagerFactory keyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyManagerFactory.init(keyStore, password);
keyManagers = keyManagerFactory.getKeyManagers();
// follow the security tip in java doc
if (password != null) {
java.util.Arrays.fill(password, ' ');
}
KeyManager[] extendedKeyManagers = new KeyManager[keyManagers.length];
for (int i = 0; i < keyManagers.length; i++)
{
extendedKeyManagers[i] = new ExtendedAliasKeyManager(keyManagers[i], sslConfig.getAlias());
}
return extendedKeyManagers;
}
/**
* context for SSL socket factories
*/
@VisibleForTesting
public SSLContext getSslContext() {
return sslContext;
}
/**
* A factory used to create client <code>Sockets</code>.
*/
public ClientSocketFactory getClientSocketFactory() {
return clientSocketFactory;
}
public SSLConfig getSslConfig() {
return sslConfig;
}
/**
* ExtendedAliasKeyManager supports use of certificate aliases in distributed system
* properties.
*/
private static class ExtendedAliasKeyManager extends X509ExtendedKeyManager {
private final X509ExtendedKeyManager delegate;
private final String keyAlias;
/**
* Constructor.
*
* @param mgr The X509KeyManager used as a delegate
* @param keyAlias The alias name of the server's keypair and supporting certificate chain
*/
ExtendedAliasKeyManager(KeyManager mgr, String keyAlias) {
this.delegate = (X509ExtendedKeyManager) mgr;
this.keyAlias = keyAlias;
}
@Override
public String[] getClientAliases(final String s, final Principal[] principals) {
return delegate.getClientAliases(s, principals);
}
@Override
public String chooseClientAlias(final String[] strings, final Principal[] principals,
final Socket socket) {
if (!StringUtils.isEmpty(this.keyAlias)) {
return keyAlias;
}
return delegate.chooseClientAlias(strings, principals, socket);
}
@Override
public String[] getServerAliases(final String s, final Principal[] principals) {
return delegate.getServerAliases(s, principals);
}
@Override
public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
if (!StringUtils.isEmpty(this.keyAlias)) {
PrivateKey key = this.delegate.getPrivateKey(this.keyAlias);
return getKeyAlias(keyType, key);
}
return this.delegate.chooseServerAlias(keyType, issuers, socket);
}
@Override
public X509Certificate[] getCertificateChain(final String s) {
if (!StringUtils.isEmpty(this.keyAlias)) {
return delegate.getCertificateChain(keyAlias);
}
return delegate.getCertificateChain(s);
}
@Override
public PrivateKey getPrivateKey(final String alias) {
return delegate.getPrivateKey(alias);
}
@Override
public String chooseEngineClientAlias(String[] keyTypes, Principal[] principals,
SSLEngine sslEngine) {
return delegate.chooseEngineClientAlias(keyTypes, principals, sslEngine);
}
@Override
public String chooseEngineServerAlias(final String keyType, final Principal[] principals,
final SSLEngine sslEngine) {
if (!StringUtils.isEmpty(this.keyAlias)) {
PrivateKey key = this.delegate.getPrivateKey(this.keyAlias);
return getKeyAlias(keyType, key);
}
return this.delegate.chooseEngineServerAlias(keyType, principals, sslEngine);
}
private String getKeyAlias(final String keyType, final PrivateKey key) {
if (key != null) {
if (key.getAlgorithm().equals(keyType)) {
return this.keyAlias;
} else {
return null;
}
} else {
return null;
}
}
}
/**
* Returns true if this SocketCreator is configured to use SSL.
*/
@Override
protected boolean useSSL() {
return this.sslConfig.isEnabled();
}
// -------------------------------------------------------------------------
// Public methods
// -------------------------------------------------------------------------
/**
* Returns an SSLEngine that can be used to perform TLS handshakes and communication
*/
public SSLEngine createSSLEngine(String hostName, int port) {
SSLEngine engine = getSslContext().createSSLEngine(hostName, port);
if (sslConfig.doEndpointIdentification()) {
// set server-names so that endpoint identification algorithms can find what's expected
SSLParameters parameters = engine.getSSLParameters();
if (setServerNames(parameters, new HostAndPort(hostName, port))) {
engine.setSSLParameters(parameters);
}
}
return engine;
}
/**
* @see <a
* href=https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#SSLENG">JSSE
* Reference Guide</a>
*
* @param socketChannel the socket's NIO channel
* @param engine the sslEngine (see createSSLEngine)
* @param timeout handshake timeout in milliseconds. No timeout if <= 0
* @param clientSocket set to true if you initiated the connect(), false if you accepted it
* @param peerNetBuffer the buffer to use in reading data fron socketChannel. This should also be
* used in subsequent I/O operations
* @return The SSLEngine to be used in processing data for sending/receiving from the channel
*/
public NioSslEngine handshakeSSLSocketChannel(SocketChannel socketChannel, SSLEngine engine,
int timeout,
boolean clientSocket,
ByteBuffer peerNetBuffer,
BufferPool bufferPool)
throws IOException {
engine.setUseClientMode(clientSocket);
if (!clientSocket) {
engine.setNeedClientAuth(sslConfig.isRequireAuth());
}
if (clientSocket) {
SSLParameters modifiedParams = checkAndEnableHostnameValidation(engine.getSSLParameters());
engine.setSSLParameters(modifiedParams);
}
while (!socketChannel.finishConnect()) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
if (!socketChannel.socket().isClosed()) {
socketChannel.close();
}
throw new IOException("Interrupted while performing handshake", e);
}
}
NioSslEngine nioSslEngine = new NioSslEngine(engine, bufferPool);
boolean blocking = socketChannel.isBlocking();
if (blocking) {
socketChannel.configureBlocking(false);
}
try {
nioSslEngine.handshake(socketChannel, timeout, peerNetBuffer);
} catch (SSLException e) {
if (!socketChannel.socket().isClosed()) {
socketChannel.close();
}
logger.warn("SSL handshake exception", e);
throw e;
} catch (InterruptedException e) {
if (!socketChannel.socket().isClosed()) {
socketChannel.close();
}
throw new IOException("SSL handshake interrupted");
} finally {
if (blocking) {
try {
socketChannel.configureBlocking(true);
} catch (IOException ignored) {
// problem setting the socket back to blocking mode but the socket's going to be closed
}
}
}
return nioSslEngine;
}
private SSLParameters checkAndEnableHostnameValidation(SSLParameters sslParameters) {
if (sslConfig.doEndpointIdentification()) {
sslParameters.setEndpointIdentificationAlgorithm("HTTPS");
} else {
if (!hostnameValidationDisabledLogShown) {
logger.info("Your SSL configuration disables hostname validation. "
+ "ssl-endpoint-identification-enabled should be set to true when SSL is enabled. "
+ "Please refer to the Apache GEODE SSL Documentation for SSL Property: ssl‑endpoint‑identification‑enabled");
hostnameValidationDisabledLogShown = true;
}
}
return sslParameters;
}
/**
* Use this method to perform the SSL handshake on a newly accepted socket. Non-SSL
* sockets are ignored by this method.
*
* @param timeout the number of milliseconds allowed for the handshake to complete
*/
void handshakeIfSocketIsSSL(Socket socket, int timeout) throws IOException {
if (!(socket instanceof SSLSocket)) {
return;
}
int oldTimeout = socket.getSoTimeout();
socket.setSoTimeout(timeout);
SSLSocket sslSocket = (SSLSocket) socket;
try {
sslSocket.startHandshake();
} catch (SSLPeerUnverifiedException ex) {
if (this.sslConfig.isRequireAuth()) {
logger.fatal(String.format("SSL Error in authenticating peer %s[%s].",
socket.getInetAddress(), socket.getPort()), ex);
throw ex;
}
}
// Pre jkd11, startHandshake is throwing SocketTimeoutException.
// in jdk 11 it is throwing SSLProtocolException with a cause of SocketTimeoutException.
// this is to keep the exception consistent across jdk
catch (SSLProtocolException ex) {
if (ex.getCause() instanceof SocketTimeoutException) {
throw (SocketTimeoutException) ex.getCause();
} else {
throw ex;
}
} finally {
try {
socket.setSoTimeout(oldTimeout);
} catch (SocketException ignored) {
}
}
}
/**
* Create a server socket with the given transport filters.<br>
* Note: This method is outside of the
* client/server/advanced interfaces because it references WAN classes that aren't
* available to them.
*/
public ServerSocket createServerSocket(int nport, int backlog, InetAddress bindAddr,
List<GatewayTransportFilter> transportFilters, int socketBufferSize) throws IOException {
if (transportFilters.isEmpty()) {
return ((SCClusterSocketCreator) forCluster())
.createServerSocket(nport, backlog, bindAddr, socketBufferSize, useSSL());
} else {
printConfig();
ServerSocket result = new TransportFilterServerSocket(transportFilters);
result.setReuseAddress(true);
// Set the receive buffer size before binding the socket so
// that large buffers will be allocated on accepted sockets (see
// java.net.ServerSocket.setReceiverBufferSize javadocs)
result.setReceiveBufferSize(socketBufferSize);
try {
result.bind(new InetSocketAddress(bindAddr, nport), backlog);
} catch (BindException e) {
BindException throwMe = new BindException(
String.format("Failed to create server socket on %s[%s]", bindAddr, nport));
throwMe.initCause(e);
throw throwMe;
}
return result;
}
}
// -------------------------------------------------------------------------
// Private implementation methods
// -------------------------------------------------------------------------
/**
* When a socket is connected to a server socket, it should be passed to this method for SSL
* configuration.
*/
void configureClientSSLSocket(Socket socket, HostAndPort addr, int timeout) throws IOException {
if (socket instanceof SSLSocket) {
SSLSocket sslSocket = (SSLSocket) socket;
sslSocket.setUseClientMode(true);
sslSocket.setEnableSessionCreation(true);
SSLParameters modifiedParams =
checkAndEnableHostnameValidation(sslSocket.getSSLParameters());
setServerNames(modifiedParams, addr);
SSLParameterExtension sslParameterExtension = this.sslConfig.getSSLParameterExtension();
if (sslParameterExtension != null) {
modifiedParams =
sslParameterExtension.modifySSLClientSocketParameters(modifiedParams);
}
sslSocket.setSSLParameters(modifiedParams);
String[] protocols = this.sslConfig.getProtocolsAsStringArray();
// restrict cyphers
if (protocols != null && !"any".equalsIgnoreCase(protocols[0])) {
sslSocket.setEnabledProtocols(protocols);
}
String[] ciphers = this.sslConfig.getCiphersAsStringArray();
if (ciphers != null && !"any".equalsIgnoreCase(ciphers[0])) {
sslSocket.setEnabledCipherSuites(ciphers);
}
try {
if (timeout > 0) {
sslSocket.setSoTimeout(timeout);
}
sslSocket.startHandshake();
}
// Pre jkd11, startHandshake is throwing SocketTimeoutException.
// in jdk 11 it is throwing SSLProtocolException with a cause of SocketTimeoutException.
// this is to keep the exception consistent across jdk
catch (SSLProtocolException ex) {
if (ex.getCause() instanceof SocketTimeoutException) {
throw (SocketTimeoutException) ex.getCause();
} else {
throw ex;
}
} catch (SSLHandshakeException ex) {
logger.fatal(String.format("Problem forming SSL connection to %s[%s].",
socket.getInetAddress(), socket.getPort()), ex);
throw ex;
} catch (SSLPeerUnverifiedException ex) {
if (this.sslConfig.isRequireAuth()) {
logger.fatal("SSL authentication exception.", ex);
throw ex;
}
}
}
}
/**
* returns true if the SSLParameters are altered, false if not
*/
private boolean setServerNames(SSLParameters modifiedParams, HostAndPort addr) {
List<SNIServerName> oldNames = modifiedParams.getServerNames();
oldNames = oldNames == null ? Collections.emptyList() : oldNames;
final List<SNIServerName> serverNames = new ArrayList<>(oldNames);
if (serverNames.stream()
.mapToInt(SNIServerName::getType)
.anyMatch(type -> type == StandardConstants.SNI_HOST_NAME)) {
// we already have a SNI hostname set. Do nothing.
return false;
}
String hostName = addr.getHostName();
if (this.sslConfig.doEndpointIdentification()
&& InetAddressValidator.getInstance().isValid(hostName)) {
// endpoint validation typically uses a hostname in the sniServer parameter that the handshake
// will compare against the subject alternative addresses in the server's certificate. Here
// we attempt to get a hostname instead of the proffered numeric address
try {
hostName = InetAddress.getByName(hostName).getCanonicalHostName();
} catch (UnknownHostException e) {
// ignore - we'll see what happens with endpoint validation using a numeric address...
}
}
serverNames.add(new SNIHostName(hostName));
modifiedParams.setServerNames(serverNames);
return true;
}
/**
* Print current configured state to log.
*/
void printConfig() {
if (!configShown && logger.isDebugEnabled()) {
configShown = true;
StringBuilder sb = new StringBuilder();
sb.append("SSL Configuration: \n");
sb.append(" ssl-enabled = ").append(this.sslConfig.isEnabled()).append("\n");
// add other options here....
for (String key : System.getProperties().stringPropertyNames()) { // fix for 46822
if (key.startsWith("javax.net.ssl")) {
String possiblyRedactedValue =
ArgumentRedactor.redactArgumentIfNecessary(key, System.getProperty(key));
sb.append(" ").append(key).append(" = ").append(possiblyRedactedValue).append("\n");
}
}
logger.debug(sb.toString());
}
}
protected void initializeClientSocketFactory() {
this.clientSocketFactory = null;
String className =
System.getProperty(GeodeGlossary.GEMFIRE_PREFIX + "clientSocketFactory");
if (className != null) {
Object o;
try {
Class c = ClassPathLoader.getLatest().forName(className);
o = c.newInstance();
} catch (Exception e) {
// No cache exists yet, so this can't be logged.
String s = "An unexpected exception occurred while instantiating a " + className + ": " + e;
throw new IllegalArgumentException(s);
}
if (o instanceof ClientSocketFactory) {
this.clientSocketFactory = (ClientSocketFactory) o;
} else {
String s = "Class \"" + className + "\" is not a ClientSocketFactory";
throw new IllegalArgumentException(s);
}
}
}
public void initializeTransportFilterClientSocketFactory(GatewaySender sender) {
this.clientSocketFactory = new TransportFilterSocketFactory()
.setGatewayTransportFilters(sender.getGatewayTransportFilters());
}
}
| [
"\"javax.net.ssl.keyStorePassword\""
]
| []
| [
"javax.net.ssl.keyStorePassword"
]
| [] | ["javax.net.ssl.keyStorePassword"] | java | 1 | 0 | |
tests/contact/forms_test.py | import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
from portfolio.contact.forms import MessageForm
from django.test import TestCase
class TestForms(TestCase):
def test_comment_form_valid_data(self):
form = MessageForm({
'name': 'Test',
'email': '[email protected]',
'subject': 'Test',
'body': 'Test',
})
self.assertTrue(form.is_valid())
def test_comment_form_has_no_data(self):
form = MessageForm({})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4) | []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/kube_config_manager/kube_config_manager.go | package kube_config_manager
import (
"context"
"fmt"
"os"
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
corev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"github.com/flant/shell-operator/pkg/kube"
"github.com/flant/addon-operator/pkg/utils"
)
type KubeConfigManager interface {
WithContext(ctx context.Context)
WithKubeClient(client kube.KubernetesClient)
WithNamespace(namespace string)
WithConfigMapName(configMap string)
WithValuesChecksumsAnnotation(annotation string)
SetKubeGlobalValues(values utils.Values) error
SetKubeModuleValues(moduleName string, values utils.Values) error
Init() error
Start()
Stop()
InitialConfig() *Config
CurrentConfig() *Config
}
type kubeConfigManager struct {
ctx context.Context
cancel context.CancelFunc
KubeClient kube.KubernetesClient
Namespace string
ConfigMapName string
initialConfig *Config
currentConfig *Config
GlobalValuesChecksum string
ModulesValuesChecksum map[string]string
}
// kubeConfigManager should implement KubeConfigManager
var _ KubeConfigManager = &kubeConfigManager{}
type ModuleConfigs map[string]utils.ModuleConfig
func (m ModuleConfigs) Names() []string {
names := make([]string, 0)
for _, newModuleConfig := range m {
names = append(names, fmt.Sprintf("'%s'", newModuleConfig.ModuleName))
}
return names
}
type Config struct {
Values utils.Values
ModuleConfigs ModuleConfigs
}
func NewConfig() *Config {
return &Config{
Values: make(utils.Values),
ModuleConfigs: make(map[string]utils.ModuleConfig),
}
}
var (
VerboseDebug bool
// ConfigUpdated chan receives a new Config when global values are changed
ConfigUpdated chan Config
// ModuleConfigsUpdated chan receives a list of all ModuleConfig in configData. Updated items marked as IsUpdated.
ModuleConfigsUpdated chan ModuleConfigs
)
func simpleMergeConfigMapData(data map[string]string, newData map[string]string) map[string]string {
for k, v := range newData {
data[k] = v
}
return data
}
func (kcm *kubeConfigManager) WithContext(ctx context.Context) {
kcm.ctx, kcm.cancel = context.WithCancel(ctx)
}
func (kcm *kubeConfigManager) Stop() {
if kcm.cancel != nil {
kcm.cancel()
}
}
func (kcm *kubeConfigManager) WithKubeClient(client kube.KubernetesClient) {
kcm.KubeClient = client
}
func (kcm *kubeConfigManager) saveGlobalKubeConfig(globalKubeConfig GlobalKubeConfig) error {
err := kcm.changeOrCreateKubeConfig(func(obj *v1.ConfigMap) error {
obj.Data = simpleMergeConfigMapData(obj.Data, globalKubeConfig.ConfigData)
return nil
})
if err != nil {
return err
}
// If ConfigMap is updated, save checksum for global section.
kcm.GlobalValuesChecksum = globalKubeConfig.Checksum
return nil
}
func (kcm *kubeConfigManager) saveModuleKubeConfig(moduleKubeConfig ModuleKubeConfig) error {
err := kcm.changeOrCreateKubeConfig(func(obj *v1.ConfigMap) error {
obj.Data = simpleMergeConfigMapData(obj.Data, moduleKubeConfig.ConfigData)
return nil
})
if err != nil {
return err
}
// TODO add a mutex for this map? Config patch from hook can run in parallel with ConfigMap editing...
kcm.ModulesValuesChecksum[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
return nil
}
func (kcm *kubeConfigManager) changeOrCreateKubeConfig(configChangeFunc func(*v1.ConfigMap) error) error {
var err error
obj, err := kcm.getConfigMap()
if err != nil {
return nil
}
if obj != nil {
if obj.Data == nil {
obj.Data = make(map[string]string)
}
err = configChangeFunc(obj)
if err != nil {
return err
}
_, err := kcm.KubeClient.CoreV1().ConfigMaps(kcm.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
} else {
obj := &v1.ConfigMap{}
obj.Name = kcm.ConfigMapName
obj.Data = make(map[string]string)
err = configChangeFunc(obj)
if err != nil {
return err
}
_, err := kcm.KubeClient.CoreV1().ConfigMaps(kcm.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
if err != nil {
return err
}
return nil
}
}
func (kcm *kubeConfigManager) WithNamespace(namespace string) {
kcm.Namespace = namespace
}
func (kcm *kubeConfigManager) WithConfigMapName(configMap string) {
kcm.ConfigMapName = configMap
}
// No need in annotation anymore.
func (kcm *kubeConfigManager) WithValuesChecksumsAnnotation(_ string) {
}
func (kcm *kubeConfigManager) SetKubeGlobalValues(values utils.Values) error {
globalKubeConfig, err := GetGlobalKubeConfigFromValues(values)
if err != nil {
return err
}
if globalKubeConfig != nil {
log.Debugf("Kube config manager: set kube global values:\n%s", values.DebugString())
err := kcm.saveGlobalKubeConfig(*globalKubeConfig)
if err != nil {
return err
}
}
return nil
}
func (kcm *kubeConfigManager) SetKubeModuleValues(moduleName string, values utils.Values) error {
moduleKubeConfig, err := GetModuleKubeConfigFromValues(moduleName, values)
if err != nil {
return err
}
if moduleKubeConfig != nil {
log.Debugf("Kube config manager: set kube module values:\n%s", moduleKubeConfig.ModuleConfig.String())
err := kcm.saveModuleKubeConfig(*moduleKubeConfig)
if err != nil {
return err
}
}
return nil
}
func (kcm *kubeConfigManager) getConfigMap() (*v1.ConfigMap, error) {
list, err := kcm.KubeClient.CoreV1().
ConfigMaps(kcm.Namespace).
List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
objExists := false
for _, obj := range list.Items {
if obj.ObjectMeta.Name == kcm.ConfigMapName {
objExists = true
break
}
}
if objExists {
obj, err := kcm.KubeClient.CoreV1().
ConfigMaps(kcm.Namespace).
Get(context.TODO(), kcm.ConfigMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
log.Debugf("KUBE_CONFIG_MANAGER: Will use ConfigMap/%s for persistent values", kcm.ConfigMapName)
return obj, nil
} else {
log.Debugf("KUBE_CONFIG_MANAGER: ConfigMap/%s is not created", kcm.ConfigMapName)
return nil, nil
}
}
func (kcm *kubeConfigManager) InitialConfig() *Config {
return kcm.initialConfig
}
func (kcm *kubeConfigManager) CurrentConfig() *Config {
return kcm.currentConfig
}
func NewKubeConfigManager() KubeConfigManager {
return &kubeConfigManager{
initialConfig: NewConfig(),
currentConfig: NewConfig(),
ModulesValuesChecksum: map[string]string{},
}
}
func (kcm *kubeConfigManager) initConfig() error {
obj, err := kcm.getConfigMap()
if err != nil {
return err
}
if obj == nil {
log.Infof("Init config from ConfigMap: cm/%s is not found", kcm.ConfigMapName)
return nil
}
initialConfig := NewConfig()
globalValuesChecksum := ""
modulesValuesChecksum := make(map[string]string)
globalKubeConfig, err := GetGlobalKubeConfigFromConfigData(obj.Data)
if err != nil {
return err
}
if globalKubeConfig != nil {
initialConfig.Values = globalKubeConfig.Values
globalValuesChecksum = globalKubeConfig.Checksum
}
for moduleName := range GetModulesNamesFromConfigData(obj.Data) {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
initialConfig.ModuleConfigs[moduleKubeConfig.ModuleName] = moduleKubeConfig.ModuleConfig
modulesValuesChecksum[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
}
kcm.initialConfig = initialConfig
kcm.currentConfig = initialConfig
kcm.GlobalValuesChecksum = globalValuesChecksum
kcm.ModulesValuesChecksum = modulesValuesChecksum
return nil
}
func (kcm *kubeConfigManager) Init() error {
log.Debug("INIT: KUBE_CONFIG")
VerboseDebug = false
if os.Getenv("KUBE_CONFIG_MANAGER_DEBUG") != "" {
VerboseDebug = true
}
ConfigUpdated = make(chan Config, 1)
ModuleConfigsUpdated = make(chan ModuleConfigs, 1)
err := kcm.initConfig()
if err != nil {
return err
}
return nil
}
// handleNewCm determine changes in kube config.
//
// New Config is send over ConfigUpdate channel if global section is changed.
//
// Array of actual ModuleConfig is send over ModuleConfigsUpdated channel
// if module sections are changed or deleted.
func (kcm *kubeConfigManager) handleNewCm(obj *v1.ConfigMap) error {
globalKubeConfig, err := GetGlobalKubeConfigFromConfigData(obj.Data)
if err != nil {
return err
}
// if global values are changed or deleted then new config should be sent over ConfigUpdated channel
isGlobalUpdated := globalKubeConfig != nil &&
globalKubeConfig.Checksum != kcm.GlobalValuesChecksum
isGlobalDeleted := globalKubeConfig == nil && kcm.GlobalValuesChecksum != ""
if isGlobalUpdated || isGlobalDeleted {
log.Infof("Kube config manager: detect changes in global section")
newConfig := NewConfig()
// calculate new checksum of a global section
newGlobalValuesChecksum := ""
if globalKubeConfig != nil {
newConfig.Values = globalKubeConfig.Values
newGlobalValuesChecksum = globalKubeConfig.Checksum
}
kcm.GlobalValuesChecksum = newGlobalValuesChecksum
// calculate new checksums of a module sections
newModulesValuesChecksum := make(map[string]string)
for moduleName := range GetModulesNamesFromConfigData(obj.Data) {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
newConfig.ModuleConfigs[moduleKubeConfig.ModuleName] = moduleKubeConfig.ModuleConfig
newModulesValuesChecksum[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
}
kcm.ModulesValuesChecksum = newModulesValuesChecksum
log.Debugf("Kube config manager: global section new values:\n%s",
newConfig.Values.DebugString())
for _, moduleConfig := range newConfig.ModuleConfigs {
log.Debugf("%s", moduleConfig.String())
}
ConfigUpdated <- *newConfig
kcm.currentConfig = newConfig
} else {
actualModulesNames := GetModulesNamesFromConfigData(obj.Data)
moduleConfigsActual := make(ModuleConfigs)
updatedCount := 0
removedCount := 0
// create ModuleConfig for each module in configData
// IsUpdated flag set for updated configs
for moduleName := range actualModulesNames {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
if moduleKubeConfig.Checksum != kcm.ModulesValuesChecksum[moduleName] {
kcm.ModulesValuesChecksum[moduleName] = moduleKubeConfig.Checksum
moduleKubeConfig.ModuleConfig.IsUpdated = true
updatedCount++
} else {
moduleKubeConfig.ModuleConfig.IsUpdated = false
}
moduleConfigsActual[moduleName] = moduleKubeConfig.ModuleConfig
}
// delete checksums for removed module sections
for module := range kcm.ModulesValuesChecksum {
if _, isActual := actualModulesNames[module]; isActual {
continue
}
delete(kcm.ModulesValuesChecksum, module)
removedCount++
}
if updatedCount > 0 || removedCount > 0 {
log.Infof("KUBE_CONFIG Detect module sections changes: %d updated, %d removed", updatedCount, removedCount)
for _, moduleConfig := range moduleConfigsActual {
log.Debugf("%s", moduleConfig.String())
}
ModuleConfigsUpdated <- moduleConfigsActual
kcm.currentConfig.ModuleConfigs = moduleConfigsActual
}
}
return nil
}
func (kcm *kubeConfigManager) handleCmAdd(obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: informer: handle ConfigMap '%s' add:\n%s", obj.Name, objYaml)
}
return kcm.handleNewCm(obj)
}
func (kcm *kubeConfigManager) handleCmUpdate(_ *v1.ConfigMap, obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: informer: handle ConfigMap '%s' update:\n%s", obj.Name, objYaml)
}
return kcm.handleNewCm(obj)
}
func (kcm *kubeConfigManager) handleCmDelete(obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: handle ConfigMap '%s' delete:\n%s", obj.Name, objYaml)
}
if kcm.GlobalValuesChecksum != "" {
kcm.GlobalValuesChecksum = ""
kcm.ModulesValuesChecksum = make(map[string]string)
ConfigUpdated <- Config{
Values: make(utils.Values),
ModuleConfigs: make(map[string]utils.ModuleConfig),
}
} else {
// Global values is already known to be empty.
// So check each module values change separately,
// and generate signals per-module.
// Note: Only ModuleName field is needed in ModuleConfig.
moduleConfigsUpdate := make(ModuleConfigs)
updateModulesNames := make([]string, 0)
for module := range kcm.ModulesValuesChecksum {
updateModulesNames = append(updateModulesNames, module)
}
for _, module := range updateModulesNames {
delete(kcm.ModulesValuesChecksum, module)
moduleConfigsUpdate[module] = utils.ModuleConfig{
ModuleName: module,
Values: make(utils.Values),
}
}
ModuleConfigsUpdated <- moduleConfigsUpdate
}
return nil
}
func (kcm *kubeConfigManager) Start() {
log.Debugf("Run kube config manager")
// define resyncPeriod for informer
resyncPeriod := time.Duration(5) * time.Minute
// define indexers for informer
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
// define tweakListOptions for informer
tweakListOptions := func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", kcm.ConfigMapName).String()
}
cmInformer := corev1.NewFilteredConfigMapInformer(kcm.KubeClient, kcm.Namespace, resyncPeriod, indexers, tweakListOptions)
cmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
err := kcm.handleCmAdd(obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap add: %s", err)
}
},
UpdateFunc: func(prevObj interface{}, obj interface{}) {
err := kcm.handleCmUpdate(prevObj.(*v1.ConfigMap), obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap update: %s", err)
}
},
DeleteFunc: func(obj interface{}) {
err := kcm.handleCmDelete(obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap delete: %s", err)
}
},
})
cmInformer.Run(kcm.ctx.Done())
}
| [
"\"KUBE_CONFIG_MANAGER_DEBUG\""
]
| []
| [
"KUBE_CONFIG_MANAGER_DEBUG"
]
| [] | ["KUBE_CONFIG_MANAGER_DEBUG"] | go | 1 | 0 | |
unicode/unicode.go | // Copyright 2012 The rspace Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Unicode is a command-line tool for studying Unicode characters.
usage: unicode [-c] [-d] [-n] [-t]
-c: args are hex; output characters (xyz)
-n: args are characters; output hex (23 or 23-44)
-g: args are regular expressions for matching names
-d: output textual description
-t: output plain text, not one char per line
-U: output full Unicode description
Default behavior sniffs the arguments to select -c vs. -n.
For some options you will need UnicodeData.txt installed.
Use curl or wget or your favorite webirific tool to copy
ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
to
$GOPATH/src/code.google.com/p/rspace.cmd/unicode
*/
package main // import "robpike.io/cmd/unicode"
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
)
var (
doNum = flag.Bool("n", false, "output numeric values")
doChar = flag.Bool("c", false, "output characters")
doText = flag.Bool("t", false, "output plain text")
doDesc = flag.Bool("d", false, "describe the characters from the Unicode database, in simple form")
doUnic = flag.Bool("u", false, "describe the characters from the Unicode database, in Unicode form")
doUNIC = flag.Bool("U", false, "describe the characters from the Unicode database, in glorious detail")
doGrep = flag.Bool("g", false, "grep for argument string in data")
)
var printRange = false
var (
unicodeTxt string
unicodeDataTxt string
goroot string
gopath string
)
func init() {
goroot = os.Getenv("GOROOT")
gopath = os.Getenv("GOPATH")
}
func getUnicode() {
if unicodeTxt == "" {
// Discover paths for unicode files.
unicodeTxt = getPath("unicode.txt")
unicodeDataTxt = getPath("UnicodeData.txt")
}
}
func getPath(base string) string {
if goroot != "" {
f := filepath.Join(goroot, "src/robpike.io/cmd/unicode", base)
if _, err := os.Stat(f); err == nil {
return f
}
}
if gopath != "" {
f := filepath.Join(gopath, "src/robpike.io/cmd/unicode", base)
if _, err := os.Stat(f); err == nil {
return f
}
}
fmt.Fprintf(os.Stderr, "unicode: can't find %s\n", base)
os.Exit(1)
return ""
}
func main() {
flag.Usage = usage
flag.Parse()
mode()
getUnicode()
var codes []rune
switch {
case *doGrep:
codes = argsAreRegexps()
case *doChar:
codes = argsAreNumbers()
case *doNum:
codes = argsAreChars()
}
if *doDesc {
desc(codes, unicodeTxt)
return
}
if *doUnic || *doUNIC {
desc(codes, unicodeDataTxt)
return
}
if *doText {
fmt.Printf("%s\n", string(codes))
return
}
b := new(bytes.Buffer)
for i, c := range codes {
switch {
case printRange:
fmt.Fprintf(b, "%.4x %c", c, c)
if i%4 == 3 {
fmt.Fprint(b, "\n")
} else {
fmt.Fprint(b, "\t")
}
case *doChar:
fmt.Fprintf(b, "%c\n", c)
case *doNum:
fmt.Fprintf(b, "%.4x\n", c)
}
}
if b.Len() > 0 && b.Bytes()[b.Len()-1] != '\n' {
fmt.Fprint(b, "\n")
}
fmt.Print(b)
}
func fatalf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
os.Exit(2)
}
const usageText = `usage: unicode [-c] [-d] [-n] [-t]
-c: args are hex; output characters (xyz)
-n: args are characters; output hex (23 or 23-44)
-g: args are regular expressions for matching names
-d: output textual description
-t: output plain text, not one char per line
-U: output full Unicode description
Default behavior sniffs the arguments to select -c vs. -n.
For some options you will need UnicodeData.txt installed.
Use curl or wget or your favorite webirific tool to copy
ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
to
$GOPATH/src/code.google.com/p/rspace.cmd/unicode`
func usage() {
fatalf(usageText)
}
// Mode determines whether we have numeric or character input.
// If there are no flags, we sniff the first argument.
func mode() {
if len(flag.Args()) == 0 {
usage()
}
// If grepping names, we need an output format defined; default is numeric.
if *doGrep && !(*doNum || *doChar || *doDesc || *doUnic || *doUNIC) {
*doNum = true
}
if *doNum || *doChar {
return
}
// If first arg is a range, print chars from hex.
if strings.ContainsRune(flag.Arg(0), '-') {
*doChar = true
return
}
// If there are non-hex digits, print hex from chars.
for _, r := range strings.Join(flag.Args(), "") {
if !strings.ContainsRune("0123456789abcdefABCDEF", r) {
*doNum = true
return
}
}
*doChar = true
}
func argsAreChars() []rune {
var codes []rune
for i, a := range flag.Args() {
for _, r := range a {
codes = append(codes, r)
}
// Add space between arguments if output is plain text.
if *doText && i < len(flag.Args())-1 {
codes = append(codes, ' ')
}
}
return codes
}
func argsAreNames() []rune {
var codes []rune
for i, a := range flag.Args() {
for _, r := range a {
codes = append(codes, r)
}
// Add space between arguments if output is plain text.
if *doText && i < len(flag.Args())-1 {
codes = append(codes, ' ')
}
}
return codes
}
func parseRune(s string) rune {
r, err := strconv.ParseInt(s, 16, 22)
if err != nil {
fatalf("%s", err)
}
return rune(r)
}
func argsAreNumbers() []rune {
var codes []rune
for _, a := range flag.Args() {
if s := strings.Split(a, "-"); len(s) == 2 {
printRange = true
r1 := parseRune(s[0])
r2 := parseRune(s[1])
if r2 < r1 {
usage()
}
for ; r1 <= r2; r1++ {
codes = append(codes, r1)
}
continue
}
codes = append(codes, parseRune(a))
}
return codes
}
func argsAreRegexps() []rune {
var codes []rune
lines := getFile(unicodeTxt)
for _, a := range flag.Args() {
re, err := regexp.Compile(a)
if err != nil {
fatalf("%s", err)
}
for i, line := range lines {
if re.MatchString(line) {
r, _ := runeOfLine(i, line)
codes = append(codes, r)
}
}
}
return codes
}
var files = make(map[string][]string)
func getFile(file string) []string {
lines := files[file]
if lines != nil {
return lines
}
text, err := ioutil.ReadFile(file)
if err != nil {
fatalf("%s", err)
}
lines = strings.Split(string(text), "\n")
// We get an empty final line; drop it.
if len(lines) > 0 && len(lines[len(lines)-1]) == 0 {
lines = lines[:len(lines)-1]
}
files[file] = lines
return lines
}
func runeOfLine(i int, line string) (r rune, tab int) {
tab = strings.IndexAny(line, "\t;")
if tab < 0 {
fatalf("malformed database: line %d", i)
}
return parseRune(line[0:tab]), tab
}
func desc(codes []rune, file string) {
lines := getFile(file)
runeData := make(map[rune]string)
for i, l := range lines {
r, tab := runeOfLine(i, l)
runeData[r] = l[tab+1:]
}
if *doUNIC {
for _, r := range codes {
fmt.Printf("%#U %s", r, dumpUnicode(runeData[r]))
}
} else {
for _, r := range codes {
fmt.Printf("%#U %s\n", r, runeData[r])
}
}
}
var prop = [...]string{
"",
"category: ",
"canonical combining classes: ",
"bidirectional category: ",
"character decomposition mapping: ",
"decimal digit value: ",
"digit value: ",
"numeric value: ",
"mirrored: ",
"Unicode 1.0 name: ",
"10646 comment field: ",
"uppercase mapping: ",
"lowercase mapping: ",
"titlecase mapping: ",
}
func dumpUnicode(s string) []byte {
fields := strings.Split(s, ";")
if len(fields) == 0 {
return []byte{'\n'}
}
b := new(bytes.Buffer)
if len(fields) != len(prop) {
fmt.Fprintf(b, "%s: can't print: expected %d fields, got %d\n", s, len(prop), len(fields))
return b.Bytes()
}
for i, f := range fields {
if f == "" {
continue
}
if i > 0 {
b.WriteByte('\t')
}
fmt.Fprintf(b, "%s%s\n", prop[i], f)
}
return b.Bytes()
}
| [
"\"GOROOT\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"GOROOT"
]
| [] | ["GOPATH", "GOROOT"] | go | 2 | 0 | |
abstract_demo_web/abstract_demo_web/wsgi.py | """
WSGI config for abstract_demo_web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'abstract_demo_web.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/python/pants/pantsd/pants_daemon.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import sys
import time
import warnings
from pathlib import PurePath
from typing import Any
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals import native_engine
from pants.init.engine_initializer import GraphScheduler
from pants.init.logging import initialize_stdio, pants_log_path
from pants.init.util import init_workdir
from pants.option.global_options import GlobalOptions, LocalStoreOptions
from pants.option.option_value_container import OptionValueContainer
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.pants_daemon_core import PantsDaemonCore
from pants.pantsd.process_manager import PantsDaemonProcessManager
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.util.contextutil import argv_as, hermetic_environment_as
from pants.util.dirutil import safe_open
from pants.util.logging import LogLevel
from pants.util.strutil import ensure_text
class PantsDaemon(PantsDaemonProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
@classmethod
def create(
cls, options_bootstrapper: OptionsBootstrapper, env: CompleteEnvironment
) -> PantsDaemon:
# Any warnings that would be triggered here are re-triggered later per-run of Pants, so we
# silence them.
with warnings.catch_warnings(record=True):
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
executor = GlobalOptions.create_py_executor(bootstrap_options_values)
core = PantsDaemonCore(options_bootstrapper, env, executor, cls._setup_services)
server = native_engine.nailgun_server_create(
executor,
bootstrap_options_values.pantsd_pailgun_port,
DaemonPantsRunner(core),
)
return PantsDaemon(
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level,
server=server,
core=core,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options,
)
@staticmethod
def _setup_services(
bootstrap_options: OptionValueContainer,
graph_scheduler: GraphScheduler,
):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
build_root = get_buildroot()
invalidation_globs = GlobalOptions.compute_pantsd_invalidation_globs(
build_root,
bootstrap_options,
)
scheduler_service = SchedulerService(
graph_scheduler=graph_scheduler,
build_root=build_root,
invalidation_globs=invalidation_globs,
pidfile=PantsDaemon.metadata_file_path(
"pantsd", "pid", bootstrap_options.pants_subprocessdir
),
pid=os.getpid(),
max_memory_usage_in_bytes=bootstrap_options.pantsd_max_memory_usage,
)
store_gc_service = StoreGCService(
graph_scheduler.scheduler,
local_store_options=LocalStoreOptions.from_options(bootstrap_options),
)
return PantsServices(services=(scheduler_service, store_gc_service))
def __init__(
self,
work_dir: str,
log_level: LogLevel,
server: Any,
core: PantsDaemonCore,
metadata_base_dir: str,
bootstrap_options: Options,
):
"""
NB: A PantsDaemon instance is generally instantiated via `create`.
:param work_dir: The pants work directory.
:param log_level: The log level to use for daemon logging.
:param server: A native PyNailgunServer instance (not currently a nameable type).
:param core: A PantsDaemonCore.
:param metadata_base_dir: The ProcessManager metadata base dir.
:param bootstrap_options: The bootstrap options.
"""
super().__init__(bootstrap_options, daemon_entrypoint=__name__)
self._build_root = get_buildroot()
self._work_dir = work_dir
self._server = server
self._core = core
self._bootstrap_options = bootstrap_options
self._logger = logging.getLogger(__name__)
def _close_stdio(self, log_path: PurePath):
"""Close stdio and append to a log path instead.
The vast majority of Python-level IO will be re-routed to thread-local destinations by
`initialize_stdio`, but we close stdio to avoid any stray output in the tty that launched
pantsd.
Rather than leaving 0, 1, 2 dangling though, we open replacements as a backstop for fatal
errors or unmodified code (such as Rust panic handlers) that might expect them to be valid
file handles.
"""
for attr, writable in (("stdin", False), ("stdout", True), ("stderr", True)):
# Close the old.
fd = getattr(sys, attr)
fileno = fd.fileno()
fd.flush()
fd.close()
# Open the new.
temp_fd = safe_open(log_path, "w") if writable else open(os.devnull)
os.dup2(temp_fd.fileno(), fileno)
setattr(sys, attr, os.fdopen(fileno, mode=("w" if writable else "r")))
sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr
def _initialize_metadata(self) -> None:
"""Writes out our pid and other metadata.
Order matters a bit here, because technically all that is necessary to connect is the port,
and Services are lazily initialized by the core when a connection is established. Our pid
needs to be on disk before that happens.
"""
# Write the pidfile. The SchedulerService will monitor it after a grace period.
self.write_pid()
self.write_process_name()
self.write_fingerprint(ensure_text(self.options_fingerprint))
self._logger.debug(f"pantsd running with PID: {self.pid}")
self.write_socket(self._server.port())
def run_sync(self):
"""Synchronously run pantsd."""
os.environ.pop("PYTHONPATH")
global_bootstrap_options = self._bootstrap_options.for_global_scope()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title(f"pantsd [{self._build_root}]")
# Switch log output to the daemon's log stream, and empty `env` and `argv` to encourage all
# further usage of those variables to happen via engine APIs and options.
self._close_stdio(pants_log_path(PurePath(global_bootstrap_options.pants_workdir)))
with initialize_stdio(global_bootstrap_options), argv_as(
tuple()
), hermetic_environment_as():
# Install signal and panic handling.
ExceptionSink.install(
log_location=init_workdir(global_bootstrap_options), pantsd_instance=True
)
native_engine.maybe_set_panic_handler()
self._initialize_metadata()
# Check periodically whether the core is valid, and exit if it is not.
while self._core.is_valid():
time.sleep(self.JOIN_TIMEOUT_SECONDS)
# We're exiting: join the server to avoid interrupting ongoing runs.
self._logger.info("Waiting for ongoing runs to complete before exiting...")
native_engine.nailgun_server_await_shutdown(self._server)
self._logger.info("Exiting pantsd")
def launch_new_pantsd_instance():
"""An external entrypoint that spawns a new pantsd instance."""
options_bootstrapper = OptionsBootstrapper.create(
env=os.environ, args=sys.argv, allow_pantsrc=True
)
env = CompleteEnvironment(os.environ)
daemon = PantsDaemon.create(options_bootstrapper, env)
daemon.run_sync()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tools/sdk-tools/sdkcommon/sdkcommon.go | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package sdkcommon
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"go.fuchsia.dev/fuchsia/tools/lib/color"
"go.fuchsia.dev/fuchsia/tools/lib/logger"
)
var (
// ExecCommand exports exec.Command as a variable so it can be mocked.
ExecCommand = exec.Command
// ExecLookPath exported to support mocking.
ExecLookPath = exec.LookPath
// logging support.
logLevel = logger.InfoLevel
log = logger.NewLogger(logLevel, color.NewColor(color.ColorAuto), os.Stdout, os.Stderr, "sdk ")
)
// FuchsiaDevice represent a Fuchsia device.
type FuchsiaDevice struct {
// IPv4 or IPv6 of the Fuchsia device.
IpAddr string
// Nodename of the Fuchsia device.
Name string
}
// Default GCS bucket for prebuilt images and packages.
const defaultGCSbucket string = "fuchsia"
// GCSImage is used to return the bucket, name and version of a prebuilt.
type GCSImage struct {
Bucket string
Name string
Version string
}
// Property keys used to get and set device configuration
const (
DeviceNameKey string = "device-name"
BucketKey string = "bucket"
ImageKey string = "image"
DeviceIPKey string = "device-ip"
SSHPortKey string = "ssh-port"
PackageRepoKey string = "package-repo"
PackagePortKey string = "package-port"
DefaultKey string = "default"
// Top level key for storing data.
deviceConfigurationKey string = "DeviceConfiguration"
defaultDeviceKey string = "_DEFAULT_DEVICE_"
)
const (
defaultBucketName string = "fuchsia"
defaultSSHPort string = "22"
defaultPackagePort string = "8083"
)
var validPropertyNames = [...]string{
DeviceNameKey,
BucketKey,
ImageKey,
DeviceIPKey,
SSHPortKey,
PackageRepoKey,
PackagePortKey,
DefaultKey,
}
// DeviceConfig holds all the properties that are configured
// for a given devce.
type DeviceConfig struct {
DeviceName string `json:"device-name"`
Bucket string `json:"bucket"`
Image string `json:"image"`
DeviceIP string `json:"device-ip"`
SSHPort string `json:"ssh-port"`
PackageRepo string `json:"package-repo"`
PackagePort string `json:"package-port"`
IsDefault bool `json:"default"`
}
// SDKProperties holds the common data for SDK tools.
// These values should be set or initialized by calling
// New().
type SDKProperties struct {
dataPath string
version string
globalPropertiesFilename string
}
func (sdk SDKProperties) setDeviceDefaults(deviceConfig *DeviceConfig) DeviceConfig {
// no reasonable default for device-name
if deviceConfig.Bucket == "" {
deviceConfig.Bucket = defaultBucketName
}
// no reasonable default for image
// no reasonable default for device-ip
if deviceConfig.SSHPort == "" {
deviceConfig.SSHPort = defaultSSHPort
}
if deviceConfig.PackageRepo == "" {
deviceConfig.PackageRepo = sdk.getDefaultPackageRepoDir(deviceConfig.DeviceName)
}
if deviceConfig.PackagePort == "" {
deviceConfig.PackagePort = defaultPackagePort
}
return *deviceConfig
}
// Builds the data key for the given segments.
func getDeviceDataKey(segments []string) string {
var fullKey = []string{deviceConfigurationKey}
return strings.Join(append(fullKey, segments...), ".")
}
// DefaultGetUserHomeDir is the default implmentation of GetUserHomeDir()
// to allow mocking of user.Current()
func DefaultGetUserHomeDir() (string, error) {
usr, err := user.Current()
if err != nil {
return "", nil
}
return usr.HomeDir, nil
}
// DefaultGetUsername is the default implmentation of GetUsername()
// to allow mocking of user.Current()
func DefaultGetUsername() (string, error) {
usr, err := user.Current()
if err != nil {
return "", nil
}
return usr.Username, nil
}
// DefaultGetHostname is the default implmentation of GetHostname()
// to allow mocking of user.Current()
func DefaultGetHostname() (string, error) {
return os.Hostname()
}
// GetUserHomeDir to allow mocking.
var GetUserHomeDir = DefaultGetUserHomeDir
// GetUsername to allow mocking.
var GetUsername = DefaultGetUsername
// GetHostname to allow mocking.
var GetHostname = DefaultGetHostname
func NewWithDataPath(dataPath string) (SDKProperties, error) {
sdk := SDKProperties{}
if dataPath != "" {
sdk.dataPath = dataPath
} else {
homeDir, err := GetUserHomeDir()
if err != nil {
return sdk, err
}
sdk.dataPath = filepath.Join(homeDir, ".fuchsia")
}
toolsDir, err := sdk.GetToolsDir()
if err != nil {
return sdk, err
}
manifestFile, err := filepath.Abs(filepath.Join(toolsDir, "..", "..", "meta", "manifest.json"))
if err != nil {
return sdk, err
}
// If this is running in-tree, the manifest may not exist.
if FileExists(manifestFile) {
if sdk.version, err = readSDKVersion(manifestFile); err != nil {
return sdk, err
}
}
sdk.globalPropertiesFilename = filepath.Join(sdk.dataPath, "global_ffx_props.json")
err = initFFXGlobalConfig(sdk)
return sdk, err
}
// New creates an initialized SDKProperties using the default location
// for the data directory.
func New() (SDKProperties, error) {
return NewWithDataPath("")
}
// GetSDKVersion returns the version of the SDK or empty if not set.
// Use sdkcommon.New() to create an initalized SDKProperties struct.
func (sdk SDKProperties) GetSDKVersion() string {
return sdk.version
}
// GetSDKDataPath returns the path to the directory for storing SDK related data,
// or empty if not set.
// Use sdkcommon.New() to create an initalized SDKProperties struct.
func (sdk SDKProperties) GetSDKDataPath() string {
return sdk.dataPath
}
// getSDKVersion reads the manifest JSON file and returns the "id" property.
func readSDKVersion(manifestFilePath string) (string, error) {
manifestFile, err := os.Open(manifestFilePath)
// if we os.Open returns an error then handle it
if err != nil {
return "", err
}
defer manifestFile.Close()
data, err := ioutil.ReadAll(manifestFile)
if err != nil {
return "", err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(data), &result); err != nil {
return "", err
}
version, _ := result["id"].(string)
return version, nil
}
// GetDefaultPackageRepoDir returns the path to the package repository.
// If the value has been set with `fconfig`, use that value.
// Otherwise if there is a default target defined, return the target
// specific path.
// Lastly, if there is nothing, return the default repo path.
func (sdk SDKProperties) getDefaultPackageRepoDir(deviceName string) string {
if deviceName != "" {
return filepath.Join(sdk.GetSDKDataPath(), deviceName,
"packages", "amber-files")
}
// As a last resort, `ffx` and the data are working as intended,
// but no default has been configured, so fall back to the generic
// legacy path.
return filepath.Join(sdk.GetSDKDataPath(), "packages", "amber-files")
}
// GetDefaultDeviceName returns the name of the target device to use by default.
func (sdk SDKProperties) GetDefaultDeviceName() (string, error) {
dataKey := getDeviceDataKey([]string{defaultDeviceKey})
data, err := getDeviceConfigurationData(sdk, dataKey)
if err != nil {
return "", err
}
if name, ok := data[dataKey].(string); ok {
return name, nil
} else if len(data) == 0 {
return "", nil
}
return "", fmt.Errorf("Cannot parse default device from %v", data)
}
// GetToolsDir returns the path to the SDK tools for the current
// CPU architecture. This is implemented by default of getting the
// directory of the currently exeecuting binary.
func (sdk SDKProperties) GetToolsDir() (string, error) {
exePath, err := os.Executable()
if err != nil {
return "", fmt.Errorf("Could not currently running file: %v", err)
}
dir, err := filepath.Abs(filepath.Dir(exePath))
if err != nil {
return "", fmt.Errorf("could not get directory of currently running file: %s", err)
}
// This could be a symlink in a directory, so look for another common
// tool (ffx). If it does not, try using the dir from argv[0].
if FileExists(filepath.Join(dir, "ffx")) {
return dir, nil
}
dir, err = filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", fmt.Errorf("Could not get path of argv[0]: %v", err)
}
return dir, nil
}
// GetAvailableImages returns the images available for the given version and bucket. If
// bucket is not the default bucket, the images in the default bucket are also returned.
func (sdk SDKProperties) GetAvailableImages(version string, bucket string) ([]GCSImage, error) {
var buckets []string
var images []GCSImage
if bucket == "" || bucket == defaultGCSbucket {
buckets = []string{defaultGCSbucket}
} else {
buckets = []string{bucket, defaultGCSbucket}
}
for _, b := range buckets {
url := fmt.Sprintf("gs://%v/development/%v/images*", b, version)
args := []string{"ls", url}
output, err := runGSUtil(args)
if err != nil {
return images, err
}
for _, line := range strings.Split(strings.TrimSuffix(string(output), "\n"), "\n") {
if len(filepath.Base(line)) >= 4 {
bucketVersion := filepath.Base(filepath.Dir(filepath.Dir(line)))
name := filepath.Base(line)[:len(filepath.Base(line))-4]
images = append(images, GCSImage{Bucket: b, Version: bucketVersion, Name: name})
} else {
log.Warningf("Could not parse image name: %v", line)
}
}
}
return images, nil
}
// GetPackageSourcePath returns the GCS path for the given values.
func (sdk SDKProperties) GetPackageSourcePath(version string, bucket string, image string) string {
return fmt.Sprintf("gs://%s/development/%s/packages/%s.tar.gz", bucket, version, image)
}
// RunFFXDoctor runs common checks for the ffx tool and host environment and returns
// the stdout.
func (sdk SDKProperties) RunFFXDoctor() (string, error) {
args := []string{"doctor"}
return sdk.RunFFX(args, false)
}
// GetAddressByName returns the IPv6 address of the device.
func (sdk SDKProperties) GetAddressByName(deviceName string) (string, error) {
// Uses ffx disovery workflow by default. The legacy device-finder
// workflow can be enabled by setting the environment variable FUCHSIA_DISABLED_ffx_discovery=1.
FUCHSIA_DISABLED_FFX_DISCOVERY := os.Getenv("FUCHSIA_DISABLED_ffx_discovery")
if FUCHSIA_DISABLED_FFX_DISCOVERY == "1" {
toolsDir, err := sdk.GetToolsDir()
if err != nil {
return "", fmt.Errorf("Could not determine tools directory %v", err)
}
cmd := filepath.Join(toolsDir, "device-finder")
args := []string{"resolve", "-device-limit", "1", "-ipv4=false", deviceName}
output, err := ExecCommand(cmd, args...).Output()
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return "", fmt.Errorf("%v: %v", string(exitError.Stderr), exitError)
} else {
return "", err
}
}
return strings.TrimSpace(string(output)), nil
}
// TODO(fxb/69008): use ffx json output.
args := []string{"target", "list", "--format", "a", deviceName}
output, err := sdk.RunFFX(args, false)
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return "", fmt.Errorf("%v: %v", string(exitError.Stderr), exitError)
} else {
return "", err
}
}
return strings.TrimSpace(output), nil
}
func (f *FuchsiaDevice) String() string {
return fmt.Sprintf("%s %s", f.IpAddr, f.Name)
}
// FindDeviceByName returns a fuchsia device matching a specific device name.
func (sdk SDKProperties) FindDeviceByName(deviceName string) (*FuchsiaDevice, error) {
devices, err := sdk.ListDevices()
if err != nil {
return nil, err
}
for _, device := range devices {
if device.Name == deviceName {
return device, nil
}
}
return nil, fmt.Errorf("no device with device name %s found", deviceName)
}
// FindDeviceByIP returns a fuchsia device matching a specific ip address.
func (sdk SDKProperties) FindDeviceByIP(ipAddr string) (*FuchsiaDevice, error) {
devices, err := sdk.ListDevices()
if err != nil {
return nil, err
}
for _, device := range devices {
if device.IpAddr == ipAddr {
return device, nil
}
}
return nil, fmt.Errorf("no device with IP address %s found", ipAddr)
}
// ListDevices returns all available fuchsia devices.
func (sdk SDKProperties) ListDevices() ([]*FuchsiaDevice, error) {
var devices []*FuchsiaDevice
var err error
var output string
// Uses ffx disovery workflow by default. The legacy device-finder
// workflow can be enabled by setting the environment variable FUCHSIA_DISABLED_ffx_discovery=1.
FUCHSIA_DISABLED_FFX_DISCOVERY := os.Getenv("FUCHSIA_DISABLED_ffx_discovery")
if FUCHSIA_DISABLED_FFX_DISCOVERY == "1" {
toolsDir, err := sdk.GetToolsDir()
if err != nil {
return nil, fmt.Errorf("Could not determine tools directory %v", err)
}
cmd := filepath.Join(toolsDir, "device-finder")
args := []string{"list", "--full", "-ipv4=false"}
outputAsBytes, err := ExecCommand(cmd, args...).Output()
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return nil, fmt.Errorf("%v: %v", string(exitError.Stderr), exitError)
}
return nil, err
}
output = string(outputAsBytes)
} else {
args := []string{"target", "list", "--format", "s"}
output, err = sdk.RunFFX(args, false)
if err != nil {
return nil, err
}
}
for _, line := range strings.Split(output, "\n") {
parts := strings.Split(line, " ")
if len(parts) == 2 {
devices = append(devices, &FuchsiaDevice{
IpAddr: strings.TrimSpace(parts[0]),
Name: strings.TrimSpace(parts[1]),
})
}
}
if len(devices) < 1 {
return nil, fmt.Errorf("no devices found")
}
return devices, nil
}
func getCommonSSHArgs(sdk SDKProperties, customSSHConfig string, privateKey string,
sshPort string) []string {
var cmdArgs []string
if customSSHConfig != "" {
cmdArgs = append(cmdArgs, "-F", customSSHConfig)
} else {
cmdArgs = append(cmdArgs, "-F", getFuchsiaSSHConfigFile(sdk))
}
if privateKey != "" {
cmdArgs = append(cmdArgs, "-i", privateKey)
}
if sshPort != "" {
cmdArgs = append(cmdArgs, "-p", sshPort)
}
return cmdArgs
}
// RunSFTPCommand runs sftp (one of SSH's file copy tools).
// Setting to_target to true will copy file SRC from host to DST on the target.
// Otherwise it will copy file from SRC from target to DST on the host.
// sshPort if non-empty will use this port to connect to the device.
// The return value is the error if any.
func (sdk SDKProperties) RunSFTPCommand(targetAddress string, customSSHConfig string, privateKey string,
sshPort string, to_target bool, src string, dst string) error {
commonArgs := []string{"-q", "-b", "-"}
if customSSHConfig == "" || privateKey == "" {
if err := checkSSHConfig(sdk); err != nil {
return err
}
}
cmdArgs := getCommonSSHArgs(sdk, customSSHConfig, privateKey, sshPort)
cmdArgs = append(cmdArgs, commonArgs...)
if targetAddress == "" {
return errors.New("target address must be specified")
}
// SFTP needs the [] around the ipv6 address, which is different than ssh.
if strings.Contains(targetAddress, ":") {
targetAddress = fmt.Sprintf("[%v]", targetAddress)
}
cmdArgs = append(cmdArgs, targetAddress)
stdin := ""
if to_target {
stdin = fmt.Sprintf("put %v %v", src, dst)
} else {
stdin = fmt.Sprintf("get %v %v", src, dst)
}
return runSFTP(cmdArgs, stdin)
}
// RunSSHCommand runs the command provided in args on the given target device.
// The customSSHconfig is optional and overrides the SSH configuration defined by the SDK.
// privateKey is optional to specify a private key to use to access the device.
// verbose adds the -v flag to ssh.
// sshPort if non-empty is used as the custom ssh port on the commandline.
// The return value is the stdout.
func (sdk SDKProperties) RunSSHCommand(targetAddress string, customSSHConfig string,
privateKey string, sshPort string, verbose bool, args []string) (string, error) {
cmdArgs, err := buildSSHArgs(sdk, targetAddress, customSSHConfig, privateKey, sshPort, verbose, args)
if err != nil {
return "", err
}
return runSSH(cmdArgs, false)
}
// RunSSHShell runs the command provided in args on the given target device and
// uses the system stdin, stdout, stderr.
// Returns when the ssh process exits.
// The customSSHconfig is optional and overrides the SSH configuration defined by the SDK.
// privateKey is optional to specify a private key to use to access the device.
// sshPort if non-empty is used as the custom ssh port on the commandline.
// verbose adds the -v flag to ssh.
// The return value is the stdout.
func (sdk SDKProperties) RunSSHShell(targetAddress string, customSSHConfig string,
privateKey string, sshPort string, verbose bool, args []string) error {
cmdArgs, err := buildSSHArgs(sdk, targetAddress, customSSHConfig, privateKey,
sshPort, verbose, args)
if err != nil {
return err
}
_, err = runSSH(cmdArgs, true)
return err
}
func buildSSHArgs(sdk SDKProperties, targetAddress string, customSSHConfig string,
privateKey string, sshPort string, verbose bool, args []string) ([]string, error) {
if customSSHConfig == "" || privateKey == "" {
if err := checkSSHConfig(sdk); err != nil {
return []string{}, err
}
}
cmdArgs := getCommonSSHArgs(sdk, customSSHConfig, privateKey, sshPort)
if verbose {
cmdArgs = append(cmdArgs, "-v")
}
if targetAddress == "" {
return cmdArgs, errors.New("target address must be specified")
}
cmdArgs = append(cmdArgs, targetAddress)
cmdArgs = append(cmdArgs, args...)
return cmdArgs, nil
}
func getFuchsiaSSHConfigFile(sdk SDKProperties) string {
return filepath.Join(sdk.GetSDKDataPath(), "sshconfig")
}
/* This function creates the ssh keys needed to
work with devices running Fuchsia. There are two parts, the keys and the config.
There is a key for Fuchsia that is placed in a well-known location so that applications
which need to access the Fuchsia device can all use the same key. This is stored in
${HOME}/.ssh/fuchsia_ed25519.
The authorized key file used for paving is in ${HOME}/.ssh/fuchsia_authorized_keys.
The private key used when ssh'ing to the device is in ${HOME}/.ssh/fuchsia_ed25519.
The second part of is the sshconfig file used by the SDK when using SSH.
This is stored in the Fuchsia SDK data directory named sshconfig.
This script checks for the private key file being referenced in the sshconfig and
the matching version tag. If they are not present, the sshconfig file is regenerated.
*/
const sshConfigTag = "Fuchsia SDK config version 5 tag"
func checkSSHConfig(sdk SDKProperties) error {
// The ssh configuration should not be modified.
homeDir, err := GetUserHomeDir()
if err != nil {
return fmt.Errorf("SSH configuration requires a $HOME directory: %v", err)
}
userName, err := GetUsername()
if err != nil {
return fmt.Errorf("SSH configuration requires a user name: %v", err)
}
var (
sshDir = filepath.Join(homeDir, ".ssh")
authFile = filepath.Join(sshDir, "fuchsia_authorized_keys")
keyFile = filepath.Join(sshDir, "fuchsia_ed25519")
sshConfigFile = getFuchsiaSSHConfigFile(sdk)
)
// If the public and private key pair exist, and the sshconfig
// file is up to date, then our work here is done, return success.
if FileExists(authFile) && FileExists(keyFile) && FileExists(sshConfigFile) {
config, err := ioutil.ReadFile(sshConfigFile)
if err == nil {
if strings.Contains(string(config), sshConfigTag) {
return nil
}
}
// The version tag does not match, so remove the old config file.
os.Remove(sshConfigFile)
}
if err := os.MkdirAll(sshDir, 0755); err != nil {
return fmt.Errorf("Could not create %v: %v", sshDir, err)
}
// Check to migrate keys from old location
if !FileExists(authFile) || !FileExists(keyFile) {
if err := moveLegacyKeys(sdk, authFile, keyFile); err != nil {
return fmt.Errorf("Could not migrate legacy SSH keys: %v", err)
}
}
// Create keys if needed
if !FileExists(authFile) || !FileExists(keyFile) {
if !FileExists(keyFile) {
hostname, _ := GetHostname()
if hostname == "" {
hostname = "unknown"
}
if err := generateSSHKey(keyFile, userName, hostname); err != nil {
return fmt.Errorf("Could generate private SSH key: %v", err)
}
}
if err := generatePublicSSHKeyfile(keyFile, authFile); err != nil {
return fmt.Errorf("Could get public keys from private SSH key: %v", err)
}
}
if err := writeSSHConfigFile(sshConfigFile, sshConfigTag, keyFile); err != nil {
return fmt.Errorf("Could write sshconfig file %v: %v", sshConfigFile, err)
}
return nil
}
func generateSSHKey(keyFile string, username string, hostname string) error {
path, err := ExecLookPath("ssh-keygen")
if err != nil {
return fmt.Errorf("could not find ssh-keygen on path: %v", err)
}
args := []string{
"-P", "",
"-t", "ed25519",
"-f", keyFile,
"-C", fmt.Sprintf("%v@%v generated by Fuchsia GN SDK", username, hostname),
}
cmd := ExecCommand(path, args...)
_, err = cmd.Output()
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return fmt.Errorf("%v: %v", string(exitError.Stderr), exitError)
} else {
return err
}
}
return nil
}
func generatePublicSSHKeyfile(keyFile string, authFile string) error {
path, err := ExecLookPath("ssh-keygen")
if err != nil {
return fmt.Errorf("could not find ssh-keygen on path: %v", err)
}
args := []string{
"-y",
"-f", keyFile,
}
cmd := ExecCommand(path, args...)
publicKey, err := cmd.Output()
if err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return fmt.Errorf("%v: %v", string(exitError.Stderr), exitError)
} else {
return err
}
}
if err := os.MkdirAll(filepath.Dir(authFile), 0755); err != nil {
return err
}
output, err := os.Create(authFile)
if err != nil {
return err
}
defer output.Close()
fmt.Fprintln(output, publicKey)
return nil
}
func writeSSHConfigFile(sshConfigFile string, versionTag string, keyFile string) error {
if err := os.MkdirAll(filepath.Dir(sshConfigFile), 0755); err != nil {
return err
}
output, err := os.Create(sshConfigFile)
if err != nil {
return err
}
defer output.Close()
fmt.Fprintf(output, "# %s\n", versionTag)
fmt.Fprintf(output,
`# Configure port 8022 for connecting to a device with the local address.
# This makes it possible to forward 8022 to a device connected remotely.
# The fuchsia private key is used for the identity.
Host 127.0.0.1
Port 8022
Host ::1
Port 8022
Host *
# Turn off refusing to connect to hosts whose key has changed
StrictHostKeyChecking no
CheckHostIP no
# Disable recording the known hosts
UserKnownHostsFile=/dev/null
# Do not forward auth agent connection to remote, no X11
ForwardAgent no
ForwardX11 no
# Connection timeout in seconds
ConnectTimeout=10
# Check for server alive in seconds, max count before disconnecting
ServerAliveInterval 1
ServerAliveCountMax 10
# Try to keep the master connection open to speed reconnecting.
ControlMaster auto
ControlPersist yes
# When expanded, the ControlPath below cannot have more than 90 characters
# (total of 108 minus 18 used by a random suffix added by ssh).
# '%%C' expands to 40 chars and there are 9 fixed chars, so '~' can expand to
# up to 41 chars, which is a reasonable limit for a user's home in most
# situations. If '~' expands to more than 41 chars, the ssh connection
# will fail with an error like:
# unix_listener: path "..." too long for Unix domain socket
# A possible solution is to use /tmp instead of ~, but it has
# its own security concerns.
ControlPath=~/.ssh/fx-%%C
# Connect with user, use the identity specified.
User fuchsia
IdentitiesOnly yes
IdentityFile "%v"
GSSAPIDelegateCredentials no
`, keyFile)
return nil
}
func moveLegacyKeys(sdk SDKProperties, destAuthFile string, destKeyFile string) error {
// Check for legacy GN SDK key and copy it to the new location.
var (
legacySSHDir = filepath.Join(sdk.GetSDKDataPath(), ".ssh")
legacyKeyFile = filepath.Join(legacySSHDir, "pkey")
legacyAuthFile = filepath.Join(legacySSHDir, "authorized_keys")
)
if FileExists(legacyKeyFile) {
fmt.Fprintf(os.Stderr, "Migrating legacy key file %v to %v\n", legacyKeyFile, destKeyFile)
if err := os.Rename(legacyKeyFile, destKeyFile); err != nil {
return err
}
if FileExists(legacyAuthFile) {
if err := os.Rename(legacyAuthFile, destAuthFile); err != nil {
return err
}
}
}
return nil
}
// GetValidPropertyNames returns the list of valid properties for a
// device configuration.
func (sdk SDKProperties) GetValidPropertyNames() []string {
return validPropertyNames[:]
}
// IsValidProperty returns true if the property is a valid
// property name.
func (sdk SDKProperties) IsValidProperty(property string) bool {
for _, item := range validPropertyNames {
if item == property {
return true
}
}
return false
}
// GetFuchsiaProperty returns the value for the given property for the given device.
// If the device name is empty, the default device is used via GetDefaultDeviceName().
// It is an error if the property cannot be found.
func (sdk SDKProperties) GetFuchsiaProperty(device string, property string) (string, error) {
var err error
deviceName := device
if deviceName == "" {
if deviceName, err = sdk.GetDefaultDeviceName(); err != nil {
return "", err
}
}
deviceConfig, err := sdk.GetDeviceConfiguration(deviceName)
if err != nil {
return "", fmt.Errorf("Could not read configuration data for %v : %v", deviceName, err)
}
switch property {
case BucketKey:
return deviceConfig.Bucket, nil
case DeviceIPKey:
return deviceConfig.DeviceIP, nil
case DeviceNameKey:
return deviceConfig.DeviceName, nil
case ImageKey:
return deviceConfig.Image, nil
case PackagePortKey:
return deviceConfig.PackagePort, nil
case PackageRepoKey:
return deviceConfig.PackageRepo, nil
case SSHPortKey:
return deviceConfig.SSHPort, nil
}
return "", fmt.Errorf("Could not find property %v.%v", deviceName, property)
}
// GetDeviceConfigurations returns a list of all device configurations.
func (sdk SDKProperties) GetDeviceConfigurations() ([]DeviceConfig, error) {
var configs []DeviceConfig
// Get all config data.
configData, err := getDeviceConfigurationData(sdk, deviceConfigurationKey)
if err != nil {
return configs, fmt.Errorf("Could not read configuration data : %v", err)
}
if len(configData) == 0 {
return configs, nil
}
defaultDeviceName, err := sdk.GetDefaultDeviceName()
if err != nil {
return configs, err
}
if deviceConfigMap, ok := configData[deviceConfigurationKey].(map[string]interface{}); ok {
for k, v := range deviceConfigMap {
if !isReservedProperty(k) {
if device, ok := mapToDeviceConfig(v); ok {
device.IsDefault = defaultDeviceName == device.DeviceName
configs = append(configs, device)
}
}
}
return configs, nil
}
return configs, fmt.Errorf("Could not read configuration data: %v", configData)
}
// GetDeviceConfiguration returns the configuration for the device with the given name.
func (sdk SDKProperties) GetDeviceConfiguration(name string) (DeviceConfig, error) {
var deviceConfig DeviceConfig
dataKey := getDeviceDataKey([]string{name})
configData, err := getDeviceConfigurationData(sdk, dataKey)
if err != nil {
return deviceConfig, fmt.Errorf("Could not read configuration data : %v", err)
}
if len(configData) == 0 {
sdk.setDeviceDefaults(&deviceConfig)
return deviceConfig, nil
}
if deviceData, ok := configData[dataKey]; ok {
if deviceConfig, ok := mapToDeviceConfig(deviceData); ok {
defaultDeviceName, err := sdk.GetDefaultDeviceName()
if err != nil {
return deviceConfig, err
}
deviceConfig.IsDefault = deviceConfig.DeviceName == defaultDeviceName
// Set the default values for the device, even if not set explicitly
// This centralizes the configuration into 1 place.
sdk.setDeviceDefaults(&deviceConfig)
return deviceConfig, nil
}
return deviceConfig, fmt.Errorf("Cannot parse DeviceConfig from %v", configData)
}
return deviceConfig, fmt.Errorf("Cannot parse DeviceData.%v from %v", name, configData)
}
// SaveDeviceConfiguration persists the given device configuration properties.
func (sdk SDKProperties) SaveDeviceConfiguration(newConfig DeviceConfig) error {
// Create a map of key to value to store. Only write out values that are explicitly set to something
// that is not the default.
origConfig, err := sdk.GetDeviceConfiguration(newConfig.DeviceName)
if err != nil {
return err
}
var defaultConfig = DeviceConfig{}
sdk.setDeviceDefaults(&defaultConfig)
dataMap := make(map[string]string)
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, DeviceNameKey})] = newConfig.DeviceName
// if the value changed from the orginal, write it out.
if origConfig.Bucket != newConfig.Bucket {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, BucketKey})] = newConfig.Bucket
} else if defaultConfig.Bucket == newConfig.Bucket {
// if the new value is the default value, then write the empty string.
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, BucketKey})] = ""
}
if origConfig.DeviceIP != newConfig.DeviceIP {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, DeviceIPKey})] = newConfig.DeviceIP
} else if defaultConfig.DeviceIP == newConfig.DeviceIP {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, DeviceIPKey})] = ""
}
if origConfig.Image != newConfig.Image {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, ImageKey})] = newConfig.Image
} else if defaultConfig.Image == newConfig.Image {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, ImageKey})] = ""
}
if origConfig.PackagePort != newConfig.PackagePort {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, PackagePortKey})] = newConfig.PackagePort
} else if defaultConfig.PackagePort == newConfig.PackagePort {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, PackagePortKey})] = ""
}
if origConfig.PackageRepo != newConfig.PackageRepo {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, PackageRepoKey})] = newConfig.PackageRepo
} else if defaultConfig.PackageRepo == newConfig.PackageRepo {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, PackageRepoKey})] = ""
}
if origConfig.SSHPort != newConfig.SSHPort {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, SSHPortKey})] = newConfig.SSHPort
} else if defaultConfig.SSHPort == newConfig.SSHPort {
dataMap[getDeviceDataKey([]string{newConfig.DeviceName, SSHPortKey})] = ""
}
if newConfig.IsDefault {
dataMap[getDeviceDataKey([]string{defaultDeviceKey})] = newConfig.DeviceName
}
for key, value := range dataMap {
err := writeConfigurationData(sdk, key, value)
if err != nil {
return err
}
}
return nil
}
// RemoveDeviceConfiguration removes the device settings for the given name.
func (sdk SDKProperties) RemoveDeviceConfiguration(deviceName string) error {
dataKey := getDeviceDataKey([]string{deviceName})
args := []string{"config", "remove", "--level", "global", dataKey}
if _, err := sdk.RunFFX(args, false); err != nil {
return fmt.Errorf("Error removing %s configuration: %v", deviceName, err)
}
defaultDeviceName, err := sdk.GetDefaultDeviceName()
if err != nil {
return err
}
if defaultDeviceName == deviceName {
err := writeConfigurationData(sdk, getDeviceDataKey([]string{defaultDeviceKey}), "")
if err != nil {
return err
}
}
return nil
}
// ResolveTargetAddress evaulates the deviceIP and deviceName passed in
// to determine the target IP address. This include consulting the configuration
// information set via `fconfig`.
func (sdk SDKProperties) ResolveTargetAddress(deviceIP string, deviceName string) (string, error) {
var (
targetAddress string
err error
)
helpfulTipMsg := `Try running "ffx target list --format s" and then "fconfig set-device <device_name> --image <image_name> --default".`
// If there is a deviceIP address, use it.
if deviceIP != "" {
targetAddress = deviceIP
} else {
// No explicit address, use the name
if deviceName == "" {
// No name passed in, use the default name.
if deviceName, err = sdk.GetDefaultDeviceName(); err != nil {
return "", fmt.Errorf("could not determine default device name.\n%v %v", helpfulTipMsg, err)
}
}
if deviceName == "" {
// No address specified, no device name specified, and no device configured as the default.
return "", fmt.Errorf("invalid arguments. Need to specify --device-ip or --device-name or use fconfig to configure a default device.\n%v", helpfulTipMsg)
}
// look up a configured address by devicename
targetAddress, err = sdk.GetFuchsiaProperty(deviceName, DeviceIPKey)
if err != nil {
return "", fmt.Errorf("could not read configuration information for %v.\n%v %v", deviceName, helpfulTipMsg, err)
}
// if still nothing, resolve the device address by name
if targetAddress == "" {
if targetAddress, err = sdk.GetAddressByName(deviceName); err != nil {
return "", fmt.Errorf(`cannot get target address for %v.
Try running "ffx target list --format s" and verify the name matches in "fconfig get-all". %v`, deviceName, err)
}
}
}
if targetAddress == "" {
return "", fmt.Errorf(`could not get target device IP address for %v.
Try running "ffx target list --format s" and verify the name matches in "fconfig get-all".`, deviceName)
}
return targetAddress, nil
}
func initFFXGlobalConfig(sdk SDKProperties) error {
args := []string{"config", "env"}
var (
err error
output string
line string
)
if output, err = sdk.RunFFX(args, false); err != nil {
return fmt.Errorf("Error getting config environment %v", err)
}
reader := bufio.NewReader(bytes.NewReader([]byte(output)))
hasGlobal := false
for !hasGlobal {
line, err = reader.ReadString('\n')
if err != nil {
if err.Error() == "EOF" {
break
} else {
return err
}
}
if strings.HasPrefix(strings.TrimSpace(line), "Global") {
break
}
}
doSetEnv := len(line) == 0
if len(line) > 0 {
const (
prefix = "Global:"
prefixLen = len(prefix)
)
index := strings.Index(line, "Global:")
if index > len(line) {
return fmt.Errorf("Cannot parse `Global:` prefix from %v", line)
}
filename := strings.TrimSpace(line[index+prefixLen:])
_, err := os.Stat(filename)
doSetEnv = os.IsNotExist(err)
}
if doSetEnv {
// Create the global config level
if len(sdk.globalPropertiesFilename) == 0 {
return fmt.Errorf("Cannot initialize property config, global file name is empty: %v", sdk)
}
args := []string{"config", "env", "set", sdk.globalPropertiesFilename, "--level", "global"}
if _, err := sdk.RunFFX(args, false); err != nil {
var exitError *exec.ExitError
if errors.As(err, &exitError) {
return fmt.Errorf("Error initializing global properties environment: %v %v: %v", args, string(exitError.Stderr), exitError)
} else {
return fmt.Errorf("Error initializing global properties environment: %v %v", args, err)
}
}
}
return nil
}
// writeConfigurationData calls `ffx` to store the value at the specified key.
func writeConfigurationData(sdk SDKProperties, key string, value string) error {
args := []string{"config", "set", "--level", "global", key, value}
if output, err := sdk.RunFFX(args, false); err != nil {
return fmt.Errorf("Error writing %v = %v: %v %v", key, value, err, output)
}
return nil
}
// getDeviceConfigurationData calls `ffx` to read the data at the specified key.
func getDeviceConfigurationData(sdk SDKProperties, key string) (map[string]interface{}, error) {
var (
data map[string]interface{}
err error
output string
)
args := []string{"config", "get", key}
if output, err = sdk.RunFFX(args, false); err != nil {
// Exit code of 2 means no value was found.
if exiterr, ok := err.(*exec.ExitError); ok && exiterr.ExitCode() == 2 {
return data, nil
}
return data, fmt.Errorf("Error reading %v: %v %v", key, err, output)
}
if len(output) > 0 {
jsonString := string(output)
// wrap the response in {} and double quote the key so it is suitable for json unmarshaling.
fullJSONString := "{\"" + key + "\": " + jsonString + "}"
err := json.Unmarshal([]byte(fullJSONString), &data)
if err != nil {
return data, fmt.Errorf("Error parsing configuration data %v: %s", err, fullJSONString)
}
}
return data, nil
}
// RunFFX executes ffx with the given args, returning stdout. If there is an error,
// the error will usually be of type *ExitError.
func (sdk SDKProperties) RunFFX(args []string, interactive bool) (string, error) {
toolsDir, err := sdk.GetToolsDir()
if err != nil {
return "", fmt.Errorf("Could not determine tools directory %v", err)
}
cmd := filepath.Join(toolsDir, "ffx")
ffx := ExecCommand(cmd, args...)
if interactive {
ffx.Stderr = os.Stderr
ffx.Stdout = os.Stdout
ffx.Stdin = os.Stdin
return "", ffx.Run()
}
output, err := ffx.Output()
if err != nil {
return "", err
}
return string(output), err
}
// isReservedProperty used to differenciate between properties used
// internally and device names.
func isReservedProperty(property string) bool {
switch property {
case defaultDeviceKey:
return true
}
return false
}
// mapToDeviceConfig converts the map returned by json into a DeviceConfig struct.
func mapToDeviceConfig(data interface{}) (DeviceConfig, bool) {
var (
device DeviceConfig
deviceData map[string]interface{}
ok bool
value string
)
if deviceData, ok = data.(map[string]interface{}); ok {
for _, key := range validPropertyNames {
// the Default flag is stored else where, so don't try to
// key it from the map.
if key == DefaultKey {
continue
}
// Use Sprintf to convert the value into a string.
// This is done since some values are numeric and are
// not unmarshalled as strings.
if val, ok := deviceData[key]; ok {
value = fmt.Sprintf("%v", val)
} else {
fmt.Fprintf(os.Stderr, "Cannot get %v from %v", key, deviceData)
continue
}
switch key {
case BucketKey:
device.Bucket = value
case DeviceIPKey:
device.DeviceIP = value
case DeviceNameKey:
device.DeviceName = value
case ImageKey:
device.Image = value
case PackagePortKey:
device.PackagePort = value
case PackageRepoKey:
device.PackageRepo = value
case SSHPortKey:
device.SSHPort = value
}
}
}
return device, ok
}
| [
"\"FUCHSIA_DISABLED_ffx_discovery\"",
"\"FUCHSIA_DISABLED_ffx_discovery\""
]
| []
| [
"FUCHSIA_DISABLED_ffx_discovery"
]
| [] | ["FUCHSIA_DISABLED_ffx_discovery"] | go | 1 | 0 | |
pkg/monitors/statuscake/statuscake-monitor_test.go | package statuscake
import (
"os"
"testing"
"github.com/stakater/IngressMonitorController/pkg/config"
"github.com/stakater/IngressMonitorController/pkg/models"
"github.com/stakater/IngressMonitorController/pkg/util"
"github.com/stretchr/testify/assert"
)
func TestAddMonitorWithCorrectValues(t *testing.T) {
config := config.GetControllerConfig()
service := StatusCakeMonitorService{}
provider := util.GetProviderWithName(config, "StatusCake")
if provider == nil {
panic("Failed to find provider")
}
service.Setup(*provider)
m := models.Monitor{Name: "google-test", URL: "https://google1.com"}
service.Add(m)
mRes, err := service.GetByName("google-test")
if err != nil {
t.Error("Error: " + err.Error())
}
if mRes.Name != m.Name || mRes.URL != m.URL {
t.Error("URL and name should be the same")
}
service.Remove(*mRes)
monitor, err := service.GetByName(mRes.Name)
if monitor != nil {
t.Error("Monitor should've been deleted ", monitor, err)
}
}
func TestUpdateMonitorWithCorrectValues(t *testing.T) {
config := config.GetControllerConfig()
service := StatusCakeMonitorService{}
provider := util.GetProviderWithName(config, "StatusCake")
if provider == nil {
panic("Failed to find provider")
}
service.Setup(*provider)
m := models.Monitor{Name: "google-test", URL: "https://google.com"}
service.Add(m)
mRes, err := service.GetByName("google-test")
if err != nil {
t.Error("Error: " + err.Error())
}
if mRes.Name != m.Name || mRes.URL != m.URL {
t.Error("URL and name should be the same")
}
mRes.URL = "https://facebook.com"
service.Update(*mRes)
mRes, err = service.GetByName("google-test")
if err != nil {
t.Error("Error: " + err.Error())
}
if mRes.URL != "https://facebook.com" {
t.Error("URL and name should be the same")
}
service.Remove(*mRes)
monitor, err := service.GetByName(mRes.Name)
if monitor != nil {
t.Error("Monitor should've been deleted ", monitor, err)
}
}
func TestBuildUpsertFormAnnotations(t *testing.T) {
m := models.Monitor{Name: "google-test", URL: "https://google.com"}
m.Annotations = map[string]string{
"statuscake.monitor.stakater.com/check-rate": "60",
"statuscake.monitor.stakater.com/test-type": "TCP",
"statuscake.monitor.stakater.com/paused": "true",
"statuscake.monitor.stakater.com/ping-url": "",
"statuscake.monitor.stakater.com/follow-redirect": "true",
"statuscake.monitor.stakater.com/port": "7070",
"statuscake.monitor.stakater.com/trigger-rate": "1",
"statuscake.monitor.stakater.com/contact-group": "123456,654321",
"statuscake.monitor.stakater.com/basic-auth-user": "testuser",
"statuscake.monitor.stakater.com/node-locations": "",
"statuscake.monitor.stakater.com/status-codes": "500,501,502,503,504,505",
"statuscake.monitor.stakater.com/confirmation": "2",
"statuscake.monitor.stakater.com/enable-ssl-alert": "true",
"statuscake.monitor.stakater.com/test-tags": "test,testrun,uptime",
"statuscake.monitor.stakater.com/real-browser": "true",
}
oldEnv := os.Getenv("testuser")
os.Setenv("testuser", "testpass")
defer os.Setenv("testuser", oldEnv)
vals := buildUpsertForm(m, "")
assert.Equal(t, "testuser", vals.Get("BasicUser"))
assert.Equal(t, "testpass", vals.Get("BasicPass"))
assert.Equal(t, "60", vals.Get("CheckRate"))
assert.Equal(t, "2", vals.Get("Confirmation"))
assert.Equal(t, "123456,654321", vals.Get("ContactGroup"))
assert.Equal(t, "1", vals.Get("EnableSSLAlert"))
assert.Equal(t, "1", vals.Get("FollowRedirect"))
assert.Equal(t, "", vals.Get("NodeLocations"))
assert.Equal(t, "1", vals.Get("Paused"))
assert.Equal(t, "", vals.Get("PingURL"))
assert.Equal(t, "7070", vals.Get("Port"))
assert.Equal(t, "1", vals.Get("RealBrowser"))
assert.Equal(t, "500,501,502,503,504,505", vals.Get("StatusCodes"))
assert.Equal(t, "test,testrun,uptime", vals.Get("TestTags"))
assert.Equal(t, "TCP", vals.Get("TestType"))
assert.Equal(t, "1", vals.Get("TriggerRate"))
}
| [
"\"testuser\""
]
| []
| [
"testuser"
]
| [] | ["testuser"] | go | 1 | 0 | |
main.go | package main
import (
"errors"
"fmt"
"log"
"math/rand"
"os"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/asaskevich/govalidator"
"github.com/denverdino/aliyungo/common"
dns "github.com/honwen/aliyun-ddns-cli/alidns"
"github.com/honwen/ip2loc"
"github.com/urfave/cli"
)
// AccessKey from https://ak-console.aliyun.com/#/accesskey
type AccessKey struct {
ID string
Secret string
client *dns.Client
}
func splitDomain(fulldomain string) (rr, domain string) {
wildCard := false
if strings.HasPrefix(fulldomain, `*.`) {
wildCard = true
fulldomain = fulldomain[2:]
}
for len(fulldomain) > 0 && strings.HasSuffix(fulldomain, `.`) {
fulldomain = fulldomain[:len(fulldomain)-1]
}
subs := strings.Split(fulldomain, `.`)
if !govalidator.IsDNSName(fulldomain) || len(subs) < 2 {
log.Fatal("Not a Vaild Domain")
}
rrSubs := subs[:len(subs)-2]
domainSubs := subs[len(subs)-2:]
if wildCard {
rr = strings.Join(append([]string{`*`}, rrSubs...), `.`)
} else {
rr = strings.Join(rrSubs, `.`)
}
if len(rr) == 0 {
rr = `@`
}
domain = strings.Join(domainSubs, `.`)
// fmt.Println(rr, domain)
return
}
func (ak *AccessKey) getClient() *dns.Client {
if len(ak.ID) <= 0 && len(ak.Secret) <= 0 {
return nil
}
if ak.client == nil {
ak.client = dns.NewClient(ak.ID, ak.Secret)
ak.client.SetEndpoint(dns.DNSDefaultEndpointNew)
}
return ak.client
}
func (ak AccessKey) String() string {
return fmt.Sprintf("Access Key: [ ID: %s ;\t Secret: %s ]", ak.ID, ak.Secret)
}
func (ak *AccessKey) ListRecord(domain string) (dnsRecords []dns.RecordTypeNew, err error) {
_, domain = splitDomain(domain)
var resp *dns.DescribeDomainRecordsNewResponse
for idx := 1; idx <= 99; idx++ {
resp, err = ak.getClient().DescribeDomainRecordsNew(
&dns.DescribeDomainRecordsNewArgs{
DomainName: domain,
Pagination: common.Pagination{PageNumber: idx, PageSize: 50},
})
if err != nil {
return
}
dnsRecords = append(dnsRecords, resp.DomainRecords.Record...)
if len(dnsRecords) >= resp.PaginationResult.TotalCount {
return
}
}
return
}
func (ak *AccessKey) DelRecord(fulldomain string) (err error) {
rr, domain := splitDomain(fulldomain)
var target *dns.RecordTypeNew
if dnsRecords, err := ak.ListRecord(domain); err == nil {
for i := range dnsRecords {
if dnsRecords[i].RR == rr {
target = &dnsRecords[i]
_, err = ak.getClient().DeleteDomainRecord(
&dns.DeleteDomainRecordArgs{
RecordId: target.RecordId,
},
)
}
}
} else {
return err
}
return
}
func (ak *AccessKey) UpdateRecord(recordID, rr, dmType, value string) (err error) {
_, err = ak.getClient().UpdateDomainRecord(
&dns.UpdateDomainRecordArgs{
RecordId: recordID,
RR: rr,
Value: value,
Type: dmType,
})
return
}
func (ak *AccessKey) AddRecord(domain, rr, dmType, value string) (err error) {
_, err = ak.getClient().AddDomainRecord(
&dns.AddDomainRecordArgs{
DomainName: domain,
RR: rr,
Type: dmType,
Value: value,
})
return err
}
func (ak *AccessKey) CheckAndUpdateRecord(fulldomain, ipaddr string, ipv6 bool) (err error) {
rr, domain := splitDomain(fulldomain)
fulldomain = strings.Join([]string{rr, domain}, `.`)
if getDNS(fulldomain, ipv6) == ipaddr {
return // Skip
}
recordType := "A"
if ipv6 {
recordType = "AAAA"
}
targetCnt := 0
var target *dns.RecordTypeNew
if dnsRecords, err := ak.ListRecord(domain); err == nil {
for i := range dnsRecords {
if dnsRecords[i].RR == rr && dnsRecords[i].Type == recordType {
target = &dnsRecords[i]
targetCnt++
}
}
} else {
return err
}
if targetCnt > 1 {
ak.DelRecord(fulldomain)
target = nil
}
if target == nil {
err = ak.AddRecord(domain, rr, recordType, ipaddr)
} else if target.Value != ipaddr {
if target.Type != recordType {
return fmt.Errorf("record type error! oldType=%s, targetType=%s", target.Type, recordType)
}
err = ak.UpdateRecord(target.RecordId, target.RR, target.Type, ipaddr)
}
if err != nil && strings.Contains(err.Error(), `DomainRecordDuplicate`) {
ak.DelRecord(fulldomain)
return ak.CheckAndUpdateRecord(fulldomain, ipaddr, ipv6)
}
return err
}
var (
accessKey AccessKey
version = "MISSING build version [git hash]"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func ip2locCN(ip string) (str string) {
if loc, err := ip2loc.IP2loc(ip); err != nil {
log.Printf("%+v", err)
} else {
str = fmt.Sprintf("[%s %s %s %s]", loc.CountryName, loc.RegionName, loc.CityName, loc.IspDomain)
}
return
}
func main() {
app := cli.NewApp()
app.Name = "aliddns"
app.Usage = "aliyun-ddns-cli"
app.Version = fmt.Sprintf("Git:[%s] (%s)", strings.ToUpper(version), runtime.Version())
app.Commands = []cli.Command{
{
Name: "list",
Category: "DDNS",
Usage: "List AliYun's DNS DomainRecords Record",
Flags: []cli.Flag{
cli.StringFlag{
Name: "domain, d",
Usage: "Specific `DomainName`. like aliyun.com",
},
},
Action: func(c *cli.Context) error {
if err := appInit(c); err != nil {
return err
}
// fmt.Println(c.Command.Name, "task: ", accessKey, c.String("domain"))
if dnsRecords, err := accessKey.ListRecord(c.String("domain")); err != nil {
fmt.Printf("%+v", err)
} else {
for _, v := range dnsRecords {
fmt.Printf("%20s %-8s %s\n", v.RR+`.`+v.DomainName, v.Type, v.Value)
}
}
return nil
},
},
{
Name: "delete",
Category: "DDNS",
Usage: "Delete AliYun's DNS DomainRecords Record",
Flags: []cli.Flag{
cli.StringFlag{
Name: "domain, d",
Usage: "Specific `FullDomainName`. like ddns.aliyun.com",
},
},
Action: func(c *cli.Context) error {
if err := appInit(c); err != nil {
return err
}
// fmt.Println(c.Command.Name, "task: ", accessKey, c.String("domain"))
if err := accessKey.DelRecord(c.String("domain")); err != nil {
fmt.Printf("%+v", err)
} else {
fmt.Println(c.String("domain"), "Deleted")
}
return nil
},
},
{
Name: "update",
Category: "DDNS",
Usage: "Update AliYun's DNS DomainRecords Record, Create Record if not exist",
Flags: []cli.Flag{
cli.StringFlag{
Name: "domain, d",
Usage: "Specific `DomainName`. like ddns.aliyun.com",
},
cli.StringFlag{
Name: "ipaddr, i",
Usage: "Specific `IP`. like 1.2.3.4",
},
cli.BoolFlag{
Name: "ipv6, 6",
Usage: "update IPv6 address",
},
},
Action: func(c *cli.Context) error {
if err := appInit(c); err != nil {
return err
}
fmt.Println(c.Command.Name, "task: ", accessKey, c.String("domain"), c.String("ipaddr"))
if err := accessKey.CheckAndUpdateRecord(c.String("domain"), c.String("ipaddr"), c.Bool("ipv6")); err != nil {
log.Printf("%+v", err)
} else {
log.Println(c.String("domain"), c.String("ipaddr"), ip2locCN(c.String("ipaddr")))
}
return nil
},
},
{
Name: "auto-update",
Category: "DDNS",
Usage: "Auto-Update AliYun's DNS DomainRecords Record, Get IP using its getip",
Flags: []cli.Flag{
cli.StringFlag{
Name: "domain, d",
Usage: "Specific `DomainName`. like ddns.aliyun.com",
},
cli.StringFlag{
Name: "redo, r",
Value: "",
Usage: "redo Auto-Update, every N `Seconds`; Disable if N less than 10; End with [Rr] enable random delay: [N, 2N]",
},
cli.BoolFlag{
Name: "ipv6, 6",
Usage: "update IPv6 address",
},
},
Action: func(c *cli.Context) error {
if err := appInit(c); err != nil {
return err
}
// fmt.Println(c.Command.Name, "task: ", accessKey, c.String("domain"), c.Int64("redo"))
redoDurtionStr := c.String("redo")
if len(redoDurtionStr) > 0 && !regexp.MustCompile(`\d+[Rr]?$`).MatchString(redoDurtionStr) {
return errors.New(`redo format: [0-9]+[Rr]?$`)
}
randomDelay := regexp.MustCompile(`\d+[Rr]$`).MatchString(redoDurtionStr)
redoDurtion := 0
if randomDelay {
// Print Version if exist
if !strings.HasPrefix(version, "MISSING") {
fmt.Fprintf(os.Stderr, "%s %s\n", strings.ToUpper(c.App.Name), c.App.Version)
}
redoDurtion, _ = strconv.Atoi(redoDurtionStr[:len(redoDurtionStr)-1])
} else {
redoDurtion, _ = strconv.Atoi(redoDurtionStr)
}
for {
autoip := getIP()
if c.Bool("ipv6") {
autoip = getIP6()
}
if err := accessKey.CheckAndUpdateRecord(c.String("domain"), autoip, c.Bool("ipv6")); err != nil {
log.Printf("%+v", err)
} else {
log.Println(c.String("domain"), autoip, ip2locCN(autoip))
}
if redoDurtion < 10 {
break // Disable if N less than 10
}
if randomDelay {
time.Sleep(time.Duration(redoDurtion+rand.Intn(redoDurtion)) * time.Second)
} else {
time.Sleep(time.Duration(redoDurtion) * time.Second)
}
}
return nil
},
},
{
Name: "getip",
Category: "GET-IP",
Usage: fmt.Sprintf(" Get IP Combine %d different Web-API", len(ipAPI)),
Flags: []cli.Flag{
cli.BoolFlag{
Name: "ipv6, 6",
Usage: "IPv6",
},
},
Action: func(c *cli.Context) error {
// fmt.Println(c.Command.Name, "task: ", c.Command.Usage)
if c.Bool("ipv6") {
ip := getIP6()
fmt.Println(ip)
} else {
ip := getIP()
fmt.Println(ip, ip2locCN(ip))
}
return nil
},
},
{
Name: "resolve",
Category: "GET-IP",
Usage: fmt.Sprintf(" Get DNS-IPv4 Combine %d DNS Upstream", len(dnsUpStream)),
Flags: []cli.Flag{
cli.StringFlag{
Name: "domain, d",
Usage: "Specific `DomainName`. like ddns.aliyun.com",
},
cli.BoolFlag{
Name: "ipv6, 6",
Usage: "IPv6",
},
},
Action: func(c *cli.Context) error {
// fmt.Println(c.Command.Name, "task: ", c.Command.Usage)
ip := getDNS(c.String("domain"), c.Bool("ipv6"))
if len(ip) < 1 {
return nil
}
if c.Bool("ipv6") {
fmt.Println(ip)
} else {
fmt.Println(ip, ip2locCN(ip))
}
return nil
},
},
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "access-key-id, id",
Usage: "AliYun's Access Key ID",
},
cli.StringFlag{
Name: "access-key-secret, secret",
Usage: "AliYun's Access Key Secret",
},
cli.StringSliceFlag{
Name: "ipapi, api",
Usage: "Web-API to Get IP, like: http://myip.ipip.net",
},
}
app.Action = func(c *cli.Context) error {
return appInit(c)
}
app.Run(os.Args)
}
func appInit(c *cli.Context) error {
akids := []string{c.GlobalString("access-key-id"), os.Getenv("AKID"), os.Getenv("AccessKeyID")}
akscts := []string{c.GlobalString("access-key-secret"), os.Getenv("AKSCT"), os.Getenv("AccessKeySecret")}
sort.Sort(sort.Reverse(sort.StringSlice(akids)))
sort.Sort(sort.Reverse(sort.StringSlice(akscts)))
accessKey.ID = akids[0]
accessKey.Secret = akscts[0]
if accessKey.getClient() == nil {
cli.ShowAppHelp(c)
return errors.New("access-key is empty")
}
newIPAPI := make([]string, 0)
for _, api := range c.GlobalStringSlice("ipapi") {
if !regexp.MustCompile(`^https?://.*`).MatchString(api) {
api = "http://" + api
}
if regexp.MustCompile(`(https?|ftp|file)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]`).MatchString(api) {
newIPAPI = append(newIPAPI, api)
}
}
if len(newIPAPI) > 0 {
ipAPI = newIPAPI
}
return nil
}
| [
"\"AKID\"",
"\"AccessKeyID\"",
"\"AKSCT\"",
"\"AccessKeySecret\""
]
| []
| [
"AKSCT",
"AccessKeySecret",
"AccessKeyID",
"AKID"
]
| [] | ["AKSCT", "AccessKeySecret", "AccessKeyID", "AKID"] | go | 4 | 0 | |
lama_cleaner/server.py | #!/usr/bin/env python3
import io
import logging
import multiprocessing
import os
import time
import imghdr
from pathlib import Path
from typing import Union
import cv2
import torch
import numpy as np
from loguru import logger
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
from flask import Flask, request, send_file, cli
# Disable ability for Flask to display warning about using a development server in a production environment.
# https://gist.github.com/jerblack/735b9953ba1ab6234abb43174210d356
cli.show_server_banner = lambda *_: None
from flask_cors import CORS
from lama_cleaner.helper import (
load_img,
numpy_to_bytes,
resize_max_size,
)
NUM_THREADS = str(multiprocessing.cpu_count())
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "app/build")
class NoFlaskwebgui(logging.Filter):
def filter(self, record):
return "GET //flaskwebgui-keep-server-alive" not in record.getMessage()
logging.getLogger("werkzeug").addFilter(NoFlaskwebgui())
app = Flask(__name__, static_folder=os.path.join(BUILD_DIR, "static"))
app.config["JSON_AS_ASCII"] = False
CORS(app, expose_headers=["Content-Disposition"])
model: ModelManager = None
device = None
input_image_path: str = None
def get_image_ext(img_bytes):
w = imghdr.what("", img_bytes)
if w is None:
w = "jpeg"
return w
@app.route("/inpaint", methods=["POST"])
def process():
input = request.files
# RGB
origin_image_bytes = input["image"].read()
image, alpha_channel = load_img(origin_image_bytes)
original_shape = image.shape
interpolation = cv2.INTER_CUBIC
form = request.form
size_limit: Union[int, str] = form.get("sizeLimit", "1080")
if size_limit == "Original":
size_limit = max(image.shape)
else:
size_limit = int(size_limit)
config = Config(
ldm_steps=form["ldmSteps"],
ldm_sampler=form["ldmSampler"],
hd_strategy=form["hdStrategy"],
hd_strategy_crop_margin=form["hdStrategyCropMargin"],
hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
)
logger.info(f"Origin image shape: {original_shape}")
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
logger.info(f"Resized image shape: {image.shape}")
mask, _ = load_img(input["mask"].read(), gray=True)
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
start = time.time()
res_np_img = model(image, mask, config)
logger.info(f"process time: {(time.time() - start) * 1000}ms")
torch.cuda.empty_cache()
if alpha_channel is not None:
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
)
res_np_img = np.concatenate(
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
)
ext = get_image_ext(origin_image_bytes)
return send_file(
io.BytesIO(numpy_to_bytes(res_np_img, ext)),
mimetype=f"image/{ext}",
)
@app.route("/model")
def current_model():
return model.name, 200
@app.route("/model_downloaded/<name>")
def model_downloaded(name):
return str(model.is_downloaded(name)), 200
@app.route("/model", methods=["POST"])
def switch_model():
new_name = request.form.get("name")
if new_name == model.name:
return "Same model", 200
try:
model.switch(new_name)
except NotImplementedError:
return f"{new_name} not implemented", 403
return f"ok, switch to {new_name}", 200
@app.route("/")
def index():
return send_file(os.path.join(BUILD_DIR, "index.html"))
@app.route("/inputimage")
def set_input_photo():
if input_image_path:
with open(input_image_path, "rb") as f:
image_in_bytes = f.read()
return send_file(
input_image_path,
as_attachment=True,
attachment_filename=Path(input_image_path).name,
mimetype=f"image/{get_image_ext(image_in_bytes)}",
)
else:
return "No Input Image"
def main(args):
global model
global device
global input_image_path
device = torch.device(args.device)
input_image_path = args.input
model = ModelManager(name=args.model, device=device)
if args.gui:
app_width, app_height = args.gui_size
from flaskwebgui import FlaskUI
ui = FlaskUI(
app, width=app_width, height=app_height, host=args.host, port=args.port
)
ui.run()
else:
app.run(host=args.host, port=args.port, debug=args.debug)
| []
| []
| [
"LAMA_CLEANER_BUILD_DIR",
"TORCH_HOME",
"MKL_NUM_THREADS",
"VECLIB_MAXIMUM_THREADS",
"CACHE_DIR",
"OPENBLAS_NUM_THREADS",
"NUMEXPR_NUM_THREADS",
"OMP_NUM_THREADS"
]
| [] | ["LAMA_CLEANER_BUILD_DIR", "TORCH_HOME", "MKL_NUM_THREADS", "VECLIB_MAXIMUM_THREADS", "CACHE_DIR", "OPENBLAS_NUM_THREADS", "NUMEXPR_NUM_THREADS", "OMP_NUM_THREADS"] | python | 8 | 0 | |
qa/pull-tester/rpc-tests.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "BOTCOIND" not in os.environ:
os.environ["BOTCOIND"] = buildDir + '/src/botcoind' + EXEEXT
if "BOTCOINCLI" not in os.environ:
os.environ["BOTCOINCLI"] = buildDir + '/src/botcoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs botcoin_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs botcoin_hash to pass
'keypool.py',
'keypool-hd.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs botcoin_hash to pass
'invalidtxrequest.py', # NOTE: needs botcoin_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs botcoin_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs botcoin_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs botcoin_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Botcoin Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| []
| []
| [
"BOTCOINCLI",
"BOTCOIND"
]
| [] | ["BOTCOINCLI", "BOTCOIND"] | python | 2 | 0 | |
Released2019June06/RobotSystem/control_robot_system.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Description
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "CONG-MINH NGUYEN"
__copyright__ = "Copyright (C) 2019, HANDBOOK"
__credits__ = ["CONG-MINH NGUYEN"]
__license__ = "GPL"
__version__ = "1.0.1"
__date__ = "5/10/2019"
__maintainer__ = "CONG-MINH NGUYEN"
__email__ = "[email protected]"
__status__ = "Development" # ["Prototype", "Development", or "Production"]
# Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6
# Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
#==============================================================================
# Imported Modules
#==============================================================================
import argparse
from pathlib import Path
import os.path
import sys
import time
import copy
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" # The GPU id to use, usually either "0" or "1"
import json
import numpy as np
import cv2
import requests
from Camera.OrbbecAstraS.camera import Camera, rgbd_to_pointcloud
from GeneralUtils import List, Tuple, Dict, Union, Generic, TypeVar
from GeneralUtils import sample_arrays, stack_list_horizontal
from PointCloudUtils import visualize_pc, points_to_pc, coords_labels_to_pc, load_ply_as_pc, load_ply_as_points
from PointCloudUtils import adjust_pc_coords, global_icp
from PointCloudUtils import radian2degree, degree2radian, m2mm, mm2m, create_rotx_matrix, create_roty_matrix, create_rotz_matrix, create_tranl_matrix
from Segmentation.PointNet.learner import PointNetLearner
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def mpose2mmpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 1000
return pose * tarr
def mmpose2mpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 0.001
return pose * tarr
def load_object_models(model_path='./obj_models/modelMay10/'):
"""
Description:
:param model_path: str, path to the reference models of known objects
:return: pc_models, List[2L ndarrays], list of points of target surface
:return: centroid_models, List[Vector(3 floats)], the list of centroids of model
:return: pose_models, List[List[Vector(6 floats)]], the list of pose list of each model(each model has a list of poses)
"""
pc_models = []
centroid_models = []
pose_models = []
files = os.listdir(path=os.path.join(model_path, 'pc/'))
for _, file in enumerate(files):
filename, _ = os.path.splitext(file)
pc_model = load_ply_as_points(file_path=os.path.join(model_path, 'pc/', file))
centroid, grasping_pose = np.load(os.path.join(model_path, 'info/', filename + '.npy'), allow_pickle=True)
grasping_pose = np.array(grasping_pose).astype(float)
grasping_pose[:, :3] = mm2m(grasping_pose[:, :3])
pc_models.append(pc_model)
centroid_models.append(centroid)
pose_models.append(grasping_pose)
return pc_models, centroid_models, pose_models
def measure_xtran_params(neutral_point, transformation):
"""
Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a determined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx)
# c2r_xtran = np.linalg.inv(r2c_xtran)
return rotx, roty, rotz, tranl
def input_cli():
user_input = input("Enter CLI commands such as (--NAME VALUE ...): ")
custom_parser = argparse.ArgumentParser()
custom_parser.add_argument('-vb', '--verbose', type=bool, help='show detail results')
custom_parser.add_argument('-vs', '--voxel_size', type=float, help='adjust voxel size')
custom_parser.add_argument('-ft', '--fitness_threshold', type=float, help='adjust voxel size')
custom_parser.add_argument('-pi', '--selected_pose_id', type=int, help='select pose id that will execute grasp')
custom_args = custom_parser.parse_args(user_input.split())
return custom_args
def normalize_pc(points: np.ndarray):
new_points = copy.deepcopy(points)
new_points[:, 2] -= 0.677
new_points[:, 3:6] /= 255.
return new_points
def segment_obj_in_scene(scene_points, n_points: int=16384, n_channels: int=6, url='http://127.0.0.1:5000/api/'):
"""
Description: segment the point clouds of wrench and pipe out of scene
:param learner : Object, a PointNet Learner that's able to do predict point-wise classification
:param scene_points : 2L ndarray(shape=(n_points, n_channels)), list of points
:param n_points : int > 0, number input points of PointNet Learner
:param n_channels : int > 0, number channels of input points of PointNet Learner
:return: wrench_points : 2L ndarray, points of wrench
:return: pipe_points : 2L ndarray, points of pipe
"""
# Shuffle points to distribute the points equally in arrays(useful for next step, cut scene into parts to segment)
n_scene_points = len(scene_points)
scene_points = sample_arrays(arrs=scene_points, n_samples=n_scene_points)
# Do segment(cut scene into 2 parts, segment each part then unify results of 2 parts to get overall picture)
wrench_points = []
pipe_points = []
for i in range(2):
# sample the points to fit the network
cur_scene_points = scene_points[i * n_scene_points // 2:(i + 1) * n_scene_points // 2]
cur_scene_points = sample_arrays(arrs=cur_scene_points, n_samples=n_points)
# predict segment labels(send data to remote server through RESTful API)
# pred_labels = learner.predict(x=normalize_pc(points=cur_scene_points[:, :n_channels]))
data = {'points': normalize_pc(points=cur_scene_points[:, :n_channels]).tolist()}
j_data = json.dumps(data)
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
res = requests.post(url=url, data=j_data, headers=headers)
pred_labels = np.asarray(json.loads(res.text))
# extract the points in the scene of each object by labels
wrench_points.append(cur_scene_points[pred_labels == 2])
pipe_points.append(cur_scene_points[pred_labels == 3])
wrench_points = np.vstack(wrench_points) # get entire points of wrench
pipe_points = np.vstack(pipe_points) # get entire points of pipe
# visualize_pc(coords_labels_to_pc(coords=cur_scene_points[:, :3], labels=pred_labels))
return wrench_points, pipe_points
def match_object_surface(surface: np.ndarray, model: np.ndarray, model_centroid: Tuple[float, float, float],
voxel_size: float, n_channel: int=6, verbose: bool=False):
"""
Description:
:param surface : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model_centroid : Vector(3 floats), the centroid of `model`
:param voxel_size : float, default=0.6, downsampling size of point cloud in `global_icp` algorithm
:param n_channel : int > 0, number channels of input points of PointNet Learner
:param verbose : bool, show detail results and notification or not
:return: TYPE, MEAN
"""
point_cloud_model = adjust_pc_coords(point_cloud=points_to_pc(model[:, :n_channel]), coord=model_centroid)
point_cloud_target = adjust_pc_coords(point_cloud=points_to_pc(surface[:, :n_channel]), coord=model_centroid)
xtran = global_icp(source=points_to_pc(point_cloud_model), target=points_to_pc(point_cloud_target),
voxel_size=voxel_size, verbose=verbose)
print(xtran)
return xtran
def interpolate_pose(ref_pose, surf_xtran, rotx, roty, rotz, tranl, pc_centroid):
"""
Description: match reference_pose of (x, y, z) (rx, ry, rz) and (mode, aperture) from reference source to target point cloud
:param ref_pose : Vector(8 floats), the pose of the reference model
:param surf_xtran : Matrix(4x4 floats), the transformation matrix from source model to target point cloud
:param rotx : Matrix(4x4 floats), the transformation matrix of rotation around x axis of robot coord
:param roty : Matrix(4x4 floats), the transformation matrix of rotation around y axis of robot coord
:param rotz : Matrix(4x4 floats), the transformation matrix of rotation around z axis of robot coord
:param tranl : Matrix(4x4 floats), the transformation matrix of translation from robot origin to the camera origin
:param pc_centroid : Matrix(4x4 floats), the centroid of considered point cloud
:return: Vector(6 floats), the pose in robot system
"""
# transformation matrix of robot origin to point cloud center, xyz elements
tranl2 = create_tranl_matrix(vector=-np.array(pc_centroid))
r2pc_xyz_xtran = np.dot(np.dot(np.dot(np.dot(tranl2, tranl), rotz), roty), rotx)
pc2r_xyz_xtran = np.linalg.inv(r2pc_xyz_xtran)
# measure xyz
new_xyz = np.append(arr=ref_pose[:3], values=1, axis=None)
new_xyz = np.dot(r2pc_xyz_xtran, new_xyz)
new_xyz = np.dot(surf_xtran, new_xyz)
new_xyz = np.dot(pc2r_xyz_xtran, new_xyz)
# measure roll-pitch-yaw
# new_rpy = ref_pose[3:6] + radian2degree(rotation_matrix_to_euler_angles(surf_xtran[:3, :3]))
new_yaw = ref_pose[5] + radian2degree(rotation_matrix_to_euler_angles(surf_xtran[:3, :3]))[2] # restrict rx, ry because of real robot problem
new_pose = copy.deepcopy(ref_pose)
new_pose[:3] = new_xyz[:3]
# new_pose[3:6] = new_rpy[:3]
new_pose[5] = new_yaw
return new_pose
def rgbd_to_pointcloud1(rgb, depth, scale=0.001, focal_length_x=520, focal_length_y=513, label=False, offset=0, **kwargs):
"""
Convert single RGBD image to point cloud
:param rgb: 3L ndarray of int, RGB image
:param depth: 1L ndarray of any, depth image
:param scale: a float value, scale=0.001->scale into Meter unit, scale=1->scale into miliMeter unit
:param focal_length_x: a float value, focal_length of x axis
:param focal_length_y: a float value, focal_length of y axis
:param label: a bool value, enable or disable labeling data
:param **kwargs: a list of 3L ndarray of int, list of label tables
this arguments are only used when 'label' is set True
size(h, w) of each label table must equal size of rgb image
:return: a list of points as [X, Y, Z, label(optional)]
"""
center_y, center_x = (rgb.shape[0] - 1) / 2, (rgb.shape[1] - 1) / 2
points = []
for row in range(rgb.shape[0]):
for col in range(rgb.shape[1]):
R, G, B = rgb[row, col]
# obtain z value and ignore the un-obtained point(z=0)
Z = depth[row, col]
if Z == 0: continue
# measure world coordinates in Meter(scale=0.001) or miliMeter(scale=1)
Z = Z * scale
X = (col - center_x) * Z / focal_length_x
Y = (row - center_y) * Z / focal_length_y
# label the point if input the label table(in kwargs)
if label:
label_point = offset
for i, (mask_name, label_table) in enumerate(list(kwargs.items())):
if label_table[row, col] > 0:
label_point += i+1
points.append([X, Y, Z, R, G, B, row, col, label_point])
else:
points.append([X, Y, Z, R, G, B, row, col])
return np.asarray(points)
import math
# Checks if a matrix is a valid rotation matrix.
def is_rotation_matrix(R: np.array) -> bool:
"""
Check???
:param R: a matrix of 4x4
:return: a boolean, ???
"""
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def rotation_matrix_to_euler_angles(R):
"""
Measure rotations around x, y and z from transformation matrix
:param R: a rotation matrix
:return: an array of 3 values that describe rotations around x, y and z axis, unit is "radian"
"""
assert (is_rotation_matrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
#==============================================================================
# Main function
#==============================================================================
def _main_(args):
print('Hello World! This is {:s}'.format(args.desc))
'''**************************************************************
I. Set parameters
'''
## Read config
config_path = args.conf
with open(config_path) as config_buffer:
config = json.loads(config_buffer.read())
## Load config to variables
lbl_names = config['seg_net']['labels']
n_points = config['seg_net']['n_points']
n_channels = config['seg_net']['n_channels']
ptn_model = config['seg_net']['ptn_model']
redist = config['camera']['astra_redist']
## Interpolate some other variables
n_classes = len(lbl_names)
input_shape = [n_points, n_channels]
print("Show parameters \n\tlabels: {} \n\tn_classes: {} \n\tn_points: {} \n\tn_channels: {} \n\tptn_model: {} ".format(lbl_names, n_classes, n_points, n_channels, ptn_model))
'''**************************************************************
II. Load segmentation network model
'''
# learner = PointNetLearner(input_shape=input_shape, labels=lbl_names)
# learner.load_weight(weight_path=ptn_model)
'''**************************************************************
III. Initialize hardwares: camera and robot
'''
# Initialize camera
camera = Camera(resolution=(640, 480), fps=30, redist=redist)
url_tx40 = 'http://localhost:5001/v1/tx40/'
url_tfg = 'http://localhost:5002/v1/tfg/'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'} # config header of REST API
res = requests.put(url=url_tx40 + 'state/', data=json.dumps(True), headers=headers)
res = requests.put(url=url_tfg + 'state/', data=json.dumps(True), headers=headers)
'''**************************************************************
IV. Load object models
'''
pc_models, centroid_models, pose_models = load_object_models(model_path=config['grasp_infer']['model_path'])
rotx, roty, rotz, tranl = measure_xtran_params(neutral_point=config['grasp_infer']['coords']['neutral_point'],
transformation=config['grasp_infer']['transformation'])
'''**************************************************************
V. Record
'''
from OpenCVUtils import draw_xy_axes
camera.start()
done = False
verbose = True
voxel_size = 0.008
fitness_threshold = 0.8
selected_pose_id = 0
detected_poses = []
origin = []
while not done:
rgb, depth_distance = camera.get_stream(crop=(180, 200))
camera.display(crop=(180, 200))
key = cv2.waitKey(1) & 255
if key == 27 or chr(key) == 'q': # terminate system
print("\tESC key detected!")
done = True
elif chr(key) == 'v': # on/off verbose
verbose = not verbose
print("Verbose was toggled {}".format('ON' if verbose else 'OFF'))
elif chr(key) == 'a': # adjust parameters
custom_args = input_cli()
if custom_args.verbose: verbose = custom_args.verbose
if custom_args.voxel_size: voxel_size = custom_args.voxel_size
if custom_args.fitness_threshold: fitness_threshold = custom_args.fitness_threshold
if custom_args.selected_pose_id: selected_pose_id = custom_args.selected_pose_id
elif chr(key) == 's': # segment objects and infer grasp poses
print("==================================================")
#STEP: 1. Record and convert to point cloud
start_time = time.time()
scene_points = rgbd_to_pointcloud1(rgb=rgb, depth=depth_distance, scale=0.001,
focal_length_x=520, focal_length_y=513, label=False, offset=1)
print("Time recording: {}".format(time.time() - start_time))
# scene_point_cloud = PointCloud(points=scene_points[:, :3]); scene_point_cloud.visualize()
#STEP: 2. Segment object
start_time = time.time()
wrench_points, pipe_points = segment_obj_in_scene(scene_points=scene_points,
n_points=n_points, n_channels=n_channels)
print("Time segmenting: {}".format(time.time() - start_time))
#STEP: 3. Match surface and interpolate poses
start_time = time.time()
detected_poses = []
while (detected_poses == []) and ((len(wrench_points) > 1000) or (len(pipe_points) > 1000)):
#* Wrench
if len(wrench_points) > 1000: # the area of detected wrench must be big enough
# Match surface
surf_xtran = match_object_surface(surface=wrench_points[:, :n_channels], model=pc_models[0][:, :n_channels],
model_centroid=centroid_models[0], voxel_size=voxel_size, verbose=verbose)
# Interpolate grasp poses
if surf_xtran.fitness > fitness_threshold:
for i in range(len(pose_models[0])):
print(mpose2mmpose(pose_models[0][i]).astype(int))
pose = interpolate_pose(ref_pose=pose_models[0][i], surf_xtran=surf_xtran.transformation,
rotx=rotx, roty=roty, rotz=rotz, tranl=tranl, pc_centroid=centroid_models[0])
print(mpose2mmpose(pose).astype(int))
pose_camera_coord = np.dot(np.dot(np.dot(np.dot(tranl, rotz), roty), rotx), np.append(arr=pose[:3], values=1, axis=None))
dis = scene_points[:, :3] - pose_camera_coord[:3]
dis = np.sum(dis * dis, axis=1)
if (origin == []): origin = scene_points[np.argmin(dis), 6:8]
detected_poses.append([pose, 1])
#* Pipe
if len(pipe_points) > 1000: # the area of detected pipe must be big enough
# Match surface
surf_xtran = match_object_surface(surface=pipe_points[:, :n_channels], model=pc_models[1][:, :n_channels],
model_centroid=centroid_models[1], voxel_size=voxel_size, verbose=verbose)
# Interpolate grasp poses
if surf_xtran.fitness > fitness_threshold:
for i in range(len(pose_models[1])):
print(mpose2mmpose(pose_models[1][i]).astype(int))
pose = interpolate_pose(ref_pose=pose_models[1][i], surf_xtran=surf_xtran.transformation,
rotx=rotx, roty=roty, rotz=rotz, tranl=tranl, pc_centroid=centroid_models[1])
print(mpose2mmpose(pose).astype(int))
detected_poses.append([pose, 2])
break
print("Time matching: {}".format(time.time() - start_time))
elif chr(key) == 'g': # execute grasping
if len(detected_poses) > 0:
x, y, z, rx, ry, rz, mode, aperture = mpose2mmpose(detected_poses[selected_pose_id][0])
if rx > 180: rx = 180
if ry > 180: ry = 180
res = requests.put(url=url_tfg + 'mode/', data=json.dumps(mode), headers=headers)
res = requests.put(url=url_tfg + 'position/',
data=json.dumps({"pos": aperture, "speed": 110, "force": 20}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': -250, 'y': 250, 'z': 50, 'rx': 180, 'ry': 0, 'rz': 0}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': x, 'y': y, 'z': 50, 'rx': rx, 'ry': ry, 'rz': rz}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': x, 'y': y, 'z': z, 'rx': rx, 'ry': ry, 'rz': rz}),
headers=headers)
res = requests.put(url=url_tfg + 'position/',
data=json.dumps({"pos": 255, "speed": 110, "force": 20}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': x, 'y': y, 'z': 50, 'rx': rx, 'ry': ry, 'rz': rz}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': -320, 'y': -75, 'z': 50, 'rx': 180, 'ry': 0, 'rz': 0}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': -320, 'y': -75, 'z': 20, 'rx': 180, 'ry': 0, 'rz': 0}),
headers=headers)
res = requests.put(url=url_tfg + 'position/',
data=json.dumps({"pos": 0, "speed": 110, "force": 20}),
headers=headers)
res = requests.put(url=url_tx40 + 'position/',
data=json.dumps({'x': -320, 'y': -75, 'z': 50, 'rx': 180, 'ry': 0, 'rz': 0}),
headers=headers)
else:
print("There is no viable pose to grasp yet.")
res = requests.put(url=url_tx40 + 'state/', data=json.dumps(False), headers=headers)
res = requests.put(url=url_tfg + 'state/', data=json.dumps(False), headers=headers)
camera.stop()
print("System Terminated.")
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Your program name!!!')
argparser.add_argument('-d', '--desc', help='description of the program', default='HANDBOOK')
argparser.add_argument('-c', '--conf', default='./config.json', help='path to configuration file')
args = argparser.parse_args()
_main_(args)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
skel/skel.go | package skel
import (
"encoding/json"
"fmt"
"os"
"github.com/Sirupsen/logrus"
)
const NAME string = "skel"
const PROVIDER string = "skel" //we might want to make this an env tied to nginx version or app name maybe...
const PROTOCOL_VERSION string = "1"
//SkelConfig is the keeper of the config
type SkelConfig struct {
SkelHost string
}
// InventoryData is the data type for inventory data produced by a plugin data
// source and emitted to the agent's inventory data store
type InventoryData map[string]interface{}
// MetricData is the data type for events produced by a plugin data source and
// emitted to the agent's metrics data store
type MetricData map[string]interface{}
// EventData is the data type for single shot events
type EventData map[string]interface{}
// PluginData defines the format of the output JSON that plugins will return
type PluginData struct {
Name string `json:"name"`
ProtocolVersion string `json:"protocol_version"`
PluginVersion string `json:"plugin_version"`
Metrics []MetricData `json:"metrics"`
Inventory map[string]InventoryData `json:"inventory"`
Events []EventData `json:"events"`
Status string `json:"status"`
}
// OutputJSON takes an object and prints it as a JSON string to the stdout.
// If the pretty attribute is set to true, the JSON will be idented for easy reading.
func OutputJSON(data interface{}, pretty bool) error {
var output []byte
var err error
if pretty {
output, err = json.MarshalIndent(data, "", "\t")
} else {
output, err = json.Marshal(data)
}
if err != nil {
return fmt.Errorf("Error outputting JSON: %s", err)
}
if string(output) == "null" {
fmt.Println("[]")
} else {
fmt.Println(string(output))
}
return nil
}
func Run(log *logrus.Logger, prettyPrint bool, version string) {
// Initialize the output structure
var data = PluginData{
Name: NAME,
ProtocolVersion: PROTOCOL_VERSION,
PluginVersion: version,
Inventory: make(map[string]InventoryData),
Metrics: make([]MetricData, 0),
Events: make([]EventData, 0),
}
var config = SkelConfig{
SkelHost: os.Getenv("KEY"),
}
validateConfig(log, config)
var metric = getMetric(log, config)
data.Metrics = append(data.Metrics, metric)
fatalIfErr(log, OutputJSON(data, prettyPrint))
}
func getMetric(log *logrus.Logger, config SkelConfig) map[string]interface{} {
return map[string]interface{}{
"event_type": "LoadBalancerSample",
"provider": PROVIDER,
"skel.stat": 1,
}
}
func validateConfig(log *logrus.Logger, config SkelConfig) {
if config.SkelHost == "" {
log.Fatal("Config Yaml is missing values. Please check the config to continue")
}
}
func fatalIfErr(log *logrus.Logger, err error) {
if err != nil {
log.WithError(err).Fatal("can't continue")
}
}
| [
"\"KEY\""
]
| []
| [
"KEY"
]
| [] | ["KEY"] | go | 1 | 0 | |
z_conncut_test.go | // +build !posix
// Copyright 2020 The Godror Authors
//
//
// SPDX-License-Identifier: UPL-1.0 OR Apache-2.0
package godror_test
import (
"bytes"
"context"
"database/sql"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"testing"
"time"
"github.com/godror/godror"
)
// TestConnCut tests prepared statements handling connection cuting.
//
// WARNING: this won't work if the remote needs TLS !!!
func TestConnCut(t *testing.T) {
if os.Getenv("GODROR_TEST_DB") == "" {
t.Skip("TestConnCut does not work with the default TLS'd Cloud DB")
}
// First, find out the remote address of the connection
rem1 := make(map[string]net.TCPAddr)
if err := getRemotes(rem1); err != nil {
t.Fatal(err)
}
P, err := godror.ParseConnString(testConStr)
if err != nil {
t.Fatal(err)
}
P.StandaloneConnection = true
db, err := sql.Open("godror", P.StringWithPassword())
if err != nil {
t.Fatal(err)
}
defer db.Close()
ctx, cancel := context.WithTimeout(testContext("ConnCut"), 10*time.Second)
defer cancel()
const qry = "SELECT SYS_CONTEXT('userenv', 'service_name') FROM all_objects"
var upstream net.TCPAddr
rem2 := make(map[string]net.TCPAddr)
var serviceName string
for i := 0; i < 10; i++ {
for k := range rem2 {
delete(rem2, k)
}
shortCtx, shortCancel := context.WithTimeout(ctx, 3*time.Second)
rows, err := db.QueryContext(shortCtx, qry)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
rows.Next()
err = rows.Scan(&serviceName)
shortCancel()
if err != nil {
t.Fatal(err)
}
err = getRemotes(rem2)
if err != nil {
t.Fatal(err)
}
t.Log("service:", serviceName)
for k := range rem2 {
if _, ok := rem1[k]; ok {
delete(rem2, k)
continue
}
upstream = rem2[k]
}
if len(rem2) == 1 {
break
}
}
db.Close()
if len(rem2) != 1 {
t.Skipf("cannot find remote address of %q: when connecting to it, %v connections has been created",
testConStr, rem2)
}
t.Log("upstream:", upstream.String())
// Second, create proxy for it
px, err := newTCPProxy(ctx, upstream, t)
if err != nil {
t.Fatal(err)
}
pxCtx, pxCancel := context.WithCancel(ctx)
defer pxCancel()
go func() { px.Serve(pxCtx) }()
P.DSN = px.ListenAddr() + "/" + serviceName
db, err = sql.Open("godror", P.StringWithPassword())
if err != nil {
t.Fatal(err)
}
defer db.Close()
db.SetMaxOpenConns(2)
db.SetMaxIdleConns(2)
t.Log("pinging", P.String())
time.Sleep(100 * time.Millisecond)
shortCtx, shortCancel := context.WithTimeout(ctx, 3*time.Second)
done := make(chan struct{})
go func() {
select {
case <-ctx.Done():
pxCancel()
case <-done:
return
}
}()
err = db.PingContext(shortCtx)
close(done)
shortCancel()
if err != nil {
t.Skip(err)
}
// Now the real test
// 1. prepare statement
stmt, err := db.PrepareContext(ctx, "SELECT object_name FROM all_objects WHERE ROWNUM <= :2")
if err != nil {
t.Fatal(err)
}
defer stmt.Close()
// force both connections to be in use
rows1, err := stmt.QueryContext(ctx, 99)
if err != nil {
t.Fatal(err)
}
rows2, err := stmt.QueryContext(ctx, 99)
if err != nil {
rows1.Close()
t.Fatal(err)
}
rows2.Close()
rows1.Close()
for i := 0; i < 10; i++ {
shortCtx, shortCancel = context.WithTimeout(ctx, 3*time.Second)
var s string
err = stmt.QueryRowContext(shortCtx, 1).Scan(&s)
shortCancel()
if err != nil {
if i <= 3 {
t.Errorf("%d. %v", i+1, err)
} else {
t.Logf("%d. %v", i+1, err)
}
}
t.Log(s)
shortCtx, shortCancel = context.WithTimeout(ctx, 3*time.Second)
err := db.PingContext(shortCtx)
shortCancel()
if err != nil {
if i <= 3 {
t.Error(err)
} else {
t.Log(err)
}
}
if i == 3 {
t.Log("canceling proxy")
go func() {
pxCancel()
}()
}
}
}
type tcpProxy struct {
upstream net.TCPAddr
lsnr *net.TCPListener
*testing.T
}
func (px tcpProxy) ListenAddr() string { return px.lsnr.Addr().String() }
func newTCPProxy(ctx context.Context, upstream net.TCPAddr, t *testing.T) (*tcpProxy, error) {
var d net.Dialer
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
conn, err := d.DialContext(ctx, "tcp", upstream.String())
cancel()
if err != nil {
return nil, err
}
conn.Close()
px := tcpProxy{upstream: upstream, T: t}
px.lsnr, err = net.ListenTCP("tcp", &net.TCPAddr{IP: net.ParseIP("127.0.0.1")}) // random port on localhost
return &px, err
}
func (px *tcpProxy) Serve(ctx context.Context) error {
go func() {
<-ctx.Done()
px.lsnr.Close()
}()
for {
down, err := px.lsnr.AcceptTCP()
if err != nil {
if px.T != nil {
px.Log(err)
}
var tErr interface{ Temporary() bool }
if errors.As(err, &tErr) && !tErr.Temporary() {
return err
}
continue
}
go px.handleConn(ctx, down)
}
}
func (px *tcpProxy) handleConn(ctx context.Context, down *net.TCPConn) error {
defer down.Close()
up, err := net.DialTCP("tcp", nil, &px.upstream)
if err != nil {
if px.T != nil {
px.Log(err)
}
return err
}
defer up.Close()
go func() {
<-ctx.Done()
up.Close()
down.Close()
}()
pipe := func(ctx context.Context, dst, src *net.TCPConn) error {
buf := make([]byte, 512)
var consecEOF int
//remote := src.RemoteAddr()
for {
if err := ctx.Err(); err != nil {
dst.Close()
return err
}
n, err := src.Read(buf)
if n != 0 {
if _, writeErr := dst.Write(buf[:n]); writeErr != nil {
return writeErr
}
}
if err == nil {
consecEOF = 0
} else if err == io.EOF {
consecEOF++
if consecEOF > 3 {
return err
}
time.Sleep(time.Second)
continue
} else {
consecEOF = 0
if px.T != nil {
px.Logf("Copy from %s to %s: %v", src.RemoteAddr(), dst.RemoteAddr(), err)
}
return err
}
}
}
slowCtx, slowCancel := context.WithCancel(context.Background())
defer slowCancel()
go func() {
pipe(ctx, down, up)
time.Sleep(2 * time.Second)
slowCancel()
}()
return pipe(slowCtx, up, down)
}
// /proc/self/net/tcp 3. col is rem_addr:port
func getRemotes(dest map[string]net.TCPAddr) error {
for _, nm := range []string{"/proc/self/net/tcp", "/proc/self/net/tcp6"} {
b, err := ioutil.ReadFile(nm)
if err != nil {
return err
}
lines := bytes.Split(b, []byte{'\n'})
if len(lines) < 1 {
return errors.New("empty " + nm)
} else if len(lines) < 2 {
return nil
}
for _, line := range lines[1:] {
fields := bytes.Fields(line)
if len(fields) <= 2 {
continue
}
var local, remote net.TCPAddr
if _, err := fmt.Sscanf(string(fields[1])+" "+string(fields[2]), "%X:%X %X:%X",
&local.IP, &local.Port, &remote.IP, &remote.Port,
); err != nil {
return err
}
if remote.Port != 0 {
reverseBytes(local.IP)
reverseBytes(remote.IP)
dest[local.String()] = remote
}
}
}
return nil
}
func reverseBytes(p []byte) {
for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 {
p[i], p[j] = p[j], p[i]
}
}
| [
"\"GODROR_TEST_DB\""
]
| []
| [
"GODROR_TEST_DB"
]
| [] | ["GODROR_TEST_DB"] | go | 1 | 0 | |
frameworks/Go/go/setup.py | import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("go/src/hello/hello.go", "tcp\(.*:3306\)", "tcp(" + args.database_host + ":3306)")
if os.name == 'nt':
#subprocess.call("rmdir /s /q pkg\\windows_amd64", shell=True, cwd="go")
#subprocess.call("rmdir /s /q src\\github.com", shell=True, cwd="go")
#subprocess.call("del /s /q /f bin\\hello.exe", shell=True, cwd="go")
subprocess.call("set GOPATH=C:\\FrameworkBenchmarks\\go&& go get ./...", shell=True, cwd="go", stderr=errfile, stdout=logfile)
subprocess.Popen("setup.bat", shell=True, cwd="go", stderr=errfile, stdout=logfile)
return 0
# os.environ["GOPATH"] = os.path.expanduser('~/FrameworkBenchmarks/go')
subprocess.call("go get ./...", shell=True, cwd="go", stderr=errfile, stdout=logfile)
subprocess.Popen("go run src/hello/hello.go".rsplit(" "), cwd="go", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.call("taskkill /f /im go.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("taskkill /f /im hello.exe > NUL", shell=True, stderr=errfile, stdout=logfile)
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'hello' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0
| []
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | python | 1 | 0 | |
pkg/networkservice/up/option.go | // Copyright (c) 2021 Doc.ai and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package up
import (
"context"
"github.com/edwarnicke/govpp/binapi/interface_types"
)
type options struct {
loadIfIndex ifIndexFunc
}
// Option is an option pattern for upClient/Server
type Option func(o *options)
// ifIndexFunc is a function to load the interface index
type ifIndexFunc func(ctx context.Context, isClient bool) (value interface_types.InterfaceIndex, ok bool)
// WithLoadSwIfIndex - sets function to load the interface index
func WithLoadSwIfIndex(f ifIndexFunc) Option {
return func(o *options) {
o.loadIfIndex = f
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
s3update.go | package s3update
import (
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/mitchellh/ioprogress"
)
type Updater struct {
// CurrentVersion represents the current binary version.
// This is generally set at the compilation time with -ldflags "-X main.Version=42"
// See the README for additional information
CurrentVersion string
// S3Bucket represents the S3 bucket containing the different files used by s3update.
S3Bucket string
// S3Region represents the S3 region you want to work in.
S3Region string
// S3ReleaseKey represents the raw key on S3 to download new versions.
// The value can be something like `cli/releases/cli-{{OS}}-{{ARCH}}`
S3ReleaseKey string
// S3VersionKey represents the key on S3 to download the current version
S3VersionKey string
// AWSCredentials represents the config to use to connect to s3
AWSCredentials *credentials.Credentials
}
// validate ensures every required fields is correctly set. Otherwise and error is returned.
func (u Updater) validate() error {
if u.CurrentVersion == "" {
return fmt.Errorf("no version set")
}
if u.S3Bucket == "" {
return fmt.Errorf("no bucket set")
}
if u.S3Region == "" {
return fmt.Errorf("no s3 region")
}
if u.S3ReleaseKey == "" {
return fmt.Errorf("no s3ReleaseKey set")
}
if u.S3VersionKey == "" {
return fmt.Errorf("no s3VersionKey set")
}
return nil
}
// AutoUpdate runs synchronously a verification to ensure the binary is up-to-date.
// If a new version gets released, the download will happen automatically
// It's possible to bypass this mechanism by setting the S3UPDATE_DISABLED environment variable.
func AutoUpdate(u Updater) error {
if os.Getenv("S3UPDATE_DISABLED") != "" {
fmt.Println("s3update: autoupdate disabled")
return nil
}
if err := u.validate(); err != nil {
fmt.Printf("s3update: %s - skipping auto update\n", err.Error())
return err
}
return runAutoUpdate(u)
}
// generateS3ReleaseKey dynamically builds the S3 key depending on the os and architecture.
func generateS3ReleaseKey(path string) string {
path = strings.Replace(path, "{{OS}}", runtime.GOOS, -1)
path = strings.Replace(path, "{{ARCH}}", runtime.GOARCH, -1)
return path
}
func runAutoUpdate(u Updater) error {
localVersion, err := strconv.ParseInt(u.CurrentVersion, 10, 64)
if err != nil || localVersion == 0 {
return fmt.Errorf("invalid local version")
}
svc := s3.New(session.New(), &aws.Config{
Region: aws.String(u.S3Region),
Credentials: u.AWSCredentials,
})
resp, err := svc.GetObject(&s3.GetObjectInput{Bucket: aws.String(u.S3Bucket), Key: aws.String(u.S3VersionKey)})
if err != nil {
return err
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
remoteVersion, err := strconv.ParseInt(string(b), 10, 64)
if err != nil || remoteVersion == 0 {
return fmt.Errorf("invalid remote version")
}
fmt.Printf("s3update: Local Version %d - Remote Version: %d\n", localVersion, remoteVersion)
if localVersion < remoteVersion {
fmt.Printf("s3update: version outdated ... \n")
s3Key := generateS3ReleaseKey(u.S3ReleaseKey)
resp, err := svc.GetObject(&s3.GetObjectInput{Bucket: aws.String(u.S3Bucket), Key: aws.String(s3Key)})
if err != nil {
return err
}
defer resp.Body.Close()
progressR := &ioprogress.Reader{
Reader: resp.Body,
Size: *resp.ContentLength,
DrawInterval: 500 * time.Millisecond,
DrawFunc: ioprogress.DrawTerminalf(os.Stdout, func(progress, total int64) string {
bar := ioprogress.DrawTextFormatBar(40)
return fmt.Sprintf("%s %20s", bar(progress, total), ioprogress.DrawTextFormatBytes(progress, total))
}),
}
dest, err := os.Executable()
if err != nil {
return err
}
// Move the old version to a backup path that we can recover from
// in case the upgrade fails
destBackup := dest + ".bak"
if _, err := os.Stat(dest); err == nil {
os.Rename(dest, destBackup)
}
// Use the same flags that ioutil.WriteFile uses
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
os.Rename(destBackup, dest)
return err
}
defer f.Close()
fmt.Printf("s3update: downloading new version to %s\n", dest)
if _, err := io.Copy(f, progressR); err != nil {
os.Rename(destBackup, dest)
return err
}
// The file must be closed already so we can execute it in the next step
f.Close()
// Removing backup
os.Remove(destBackup)
fmt.Printf("s3update: updated with success to version %d\nRestarting application\n", remoteVersion)
// The update completed, we can now restart the application without requiring any user action.
if err := syscall.Exec(dest, os.Args, os.Environ()); err != nil {
return err
}
os.Exit(0)
}
return nil
}
| [
"\"S3UPDATE_DISABLED\""
]
| []
| [
"S3UPDATE_DISABLED"
]
| [] | ["S3UPDATE_DISABLED"] | go | 1 | 0 | |
clients/horizonclient/operation_request_test.go | package horizonclient
import (
"context"
"testing"
"github.com/stellar/go/protocols/horizon/operations"
"github.com/stellar/go/support/http/httptest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOperationRequestBuildUrl(t *testing.T) {
op := OperationRequest{endpoint: "operations"}
endpoint, err := op.BuildURL()
// It should return valid all operations endpoint and no errors
require.NoError(t, err)
assert.Equal(t, "operations", endpoint)
op = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint: "operations"}
endpoint, err = op.BuildURL()
// It should return valid account operations endpoint and no errors
require.NoError(t, err)
assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/operations", endpoint)
op = OperationRequest{ForLedger: 123, endpoint: "operations"}
endpoint, err = op.BuildURL()
// It should return valid ledger operations endpoint and no errors
require.NoError(t, err)
assert.Equal(t, "ledgers/123/operations", endpoint)
op = OperationRequest{forOperationID: "123", endpoint: "operations"}
endpoint, err = op.BuildURL()
// It should return valid operation operations endpoint and no errors
require.NoError(t, err)
assert.Equal(t, "operations/123", endpoint)
op = OperationRequest{ForTransaction: "123", endpoint: "payments"}
endpoint, err = op.BuildURL()
// It should return valid transaction payments endpoint and no errors
require.NoError(t, err)
assert.Equal(t, "transactions/123/payments", endpoint)
op = OperationRequest{ForLedger: 123, forOperationID: "789", endpoint: "operations"}
_, err = op.BuildURL()
// error case: too many parameters for building any operation endpoint
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "invalid request: too many parameters")
}
op = OperationRequest{Cursor: "123456", Limit: 30, Order: OrderAsc, endpoint: "operations", Join: "transactions"}
endpoint, err = op.BuildURL()
// It should return valid all operations endpoint with query params and no errors
require.NoError(t, err)
assert.Equal(t, "operations?cursor=123456&join=transactions&limit=30&order=asc", endpoint)
op = OperationRequest{Cursor: "123456", Limit: 30, Order: OrderAsc, endpoint: "payments", Join: "transactions"}
endpoint, err = op.BuildURL()
// It should return valid all operations endpoint with query params and no errors
require.NoError(t, err)
assert.Equal(t, "payments?cursor=123456&join=transactions&limit=30&order=asc", endpoint)
op = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint: "payments", Join: "transactions"}
endpoint, err = op.BuildURL()
// It should return valid all operations endpoint with query params and no errors
require.NoError(t, err)
assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/payments?join=transactions", endpoint)
op = OperationRequest{forOperationID: "1234", endpoint: "payments", Join: "transactions"}
endpoint, err = op.BuildURL()
// It should return valid all operations endpoint with query params and no errors
require.NoError(t, err)
assert.Equal(t, "operations/1234?join=transactions", endpoint)
}
func TestNextOperationsPage(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
operationRequest := OperationRequest{Limit: 2}
hmock.On(
"GET",
"https://localhost/operations?limit=2",
).ReturnString(200, firstOperationsPage)
ops, err := client.Operations(operationRequest)
if assert.NoError(t, err) {
assert.Equal(t, len(ops.Embedded.Records), 2)
}
hmock.On(
"GET",
"https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc",
).ReturnString(200, emptyOperationsPage)
nextPage, err := client.NextOperationsPage(ops)
if assert.NoError(t, err) {
assert.Equal(t, len(nextPage.Embedded.Records), 0)
}
}
func TestOperationRequestStreamOperations(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
// All operations
operationRequest := OperationRequest{}
ctx, cancel := context.WithCancel(context.Background())
hmock.On(
"GET",
"https://localhost/operations?cursor=now",
).ReturnString(200, operationStreamResponse)
operationStream := make([]operations.Operation, 1)
err := client.StreamOperations(ctx, operationRequest, func(op operations.Operation) {
operationStream[0] = op
cancel()
})
if assert.NoError(t, err) {
assert.Equal(t, operationStream[0].GetType(), "create_account")
}
// Account payments
operationRequest = OperationRequest{ForAccount: "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR"}
ctx, cancel = context.WithCancel(context.Background())
hmock.On(
"GET",
"https://localhost/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR/payments?cursor=now",
).ReturnString(200, operationStreamResponse)
err = client.StreamPayments(ctx, operationRequest, func(op operations.Operation) {
operationStream[0] = op
cancel()
})
if assert.NoError(t, err) {
payment, ok := operationStream[0].(operations.CreateAccount)
assert.Equal(t, ok, true)
assert.Equal(t, payment.Funder, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR")
}
// test connection error
operationRequest = OperationRequest{}
ctx, cancel = context.WithCancel(context.Background())
hmock.On(
"GET",
"https://localhost/operations?cursor=now",
).ReturnString(500, operationStreamResponse)
err = client.StreamOperations(ctx, operationRequest, func(op operations.Operation) {
operationStream[0] = op
cancel()
})
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "got bad HTTP status code 500")
}
}
func TestManageSellManageBuyOfferOfferID(t *testing.T) {
hmock := httptest.NewClient()
client := &Client{
HorizonURL: "https://localhost/",
HTTP: hmock,
}
testCases := []struct {
desc string
payload string
}{
{
desc: "offer_id as a string",
payload: manageSellBuyOfferOperationsPage,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
hmock.On(
"GET",
"https://localhost/operations",
).ReturnString(200, tc.payload)
operationRequest := OperationRequest{}
ops, err := client.Operations(operationRequest)
if assert.NoError(t, err) {
assert.Equal(t, len(ops.Embedded.Records), 2)
}
mso, ok := ops.Embedded.Records[0].(operations.ManageSellOffer)
assert.True(t, ok)
assert.Equal(t, int64(127538671), mso.OfferID)
mbo, ok := ops.Embedded.Records[1].(operations.ManageBuyOffer)
assert.True(t, ok)
assert.Equal(t, int64(127538672), mbo.OfferID)
})
}
}
var operationStreamResponse = `data: {"_links":{"self":{"href":"https://horizon-testnet.stellar.org/operations/4934917427201"},"transaction":{"href":"https://horizon-testnet.stellar.org/transactions/1c1449106a54cccd8a2ec2094815ad9db30ae54c69c3309dd08d13fdb8c749de"},"effects":{"href":"https://horizon-testnet.stellar.org/operations/4934917427201/effects"},"succeeds":{"href":"https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=4934917427201"},"precedes":{"href":"https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=4934917427201"}},"id":"4934917427201","paging_token":"4934917427201","transaction_successful":true,"source_account":"GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR","type":"create_account","type_i":0,"created_at":"2019-02-27T11:32:39Z","transaction_hash":"1c1449106a54cccd8a2ec2094815ad9db30ae54c69c3309dd08d13fdb8c749de","starting_balance":"10000.0000000","funder":"GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR","account":"GDBLBBDIUULY3HGIKXNK6WVBISY7DCNCDA45EL7NTXWX5R4UZ26HGMGS"}
`
var firstOperationsPage = `{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=&limit=2&order=asc"
},
"next": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc"
},
"prev": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967681&limit=2&order=desc"
}
},
"_embedded": {
"records": [
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/661424967681"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/661424967681/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=661424967681"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=661424967681"
}
},
"id": "661424967681",
"paging_token": "661424967681",
"transaction_successful": true,
"source_account": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H",
"type": "create_account",
"type_i": 0,
"created_at": "2019-04-24T09:16:14Z",
"transaction_hash": "749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e",
"starting_balance": "10000000000.0000000",
"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H",
"account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR"
},
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/661424967682"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/661424967682/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=661424967682"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=661424967682"
}
},
"id": "661424967682",
"paging_token": "661424967682",
"transaction_successful": true,
"source_account": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H",
"type": "create_account",
"type_i": 0,
"created_at": "2019-04-24T09:16:14Z",
"transaction_hash": "749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e",
"starting_balance": "10000.0000000",
"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H",
"account": "GDO34SQXVOSNODK7JCTAXLZUPSAF3JIH4ADQELVIKOQJUWQ3U4BMSCSH"
}
]
}
}`
var emptyOperationsPage = `{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc"
},
"next": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc"
},
"prev": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc"
}
},
"_embedded": {
"records": []
}
}`
var numberManageSellBuyOfferOperations = `{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc"
},
"next": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc"
},
"prev": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc"
}
},
"_embedded": {
"records": [
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/972702718365697"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/972702718365697/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=972702718365697"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=972702718365697"
}
},
"id": "972702718365697",
"paging_token": "972702718365697",
"transaction_successful": true,
"source_account": "GBPPEHGF322UNA62WHRHBCUBCVOIT3SLUY7U7XQEEISZ5B2JLZ3AYTDC",
"type": "manage_offer",
"type_i": 3,
"created_at": "2019-11-13T16:46:36Z",
"transaction_hash": "cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52",
"amount": "1000.0000000",
"price": "0.1312531",
"price_r": {
"n": 265,
"d": 2019
},
"buying_asset_type": "credit_alphanum4",
"buying_asset_code": "BAT",
"buying_asset_issuer": "GBBJMSXCTLXVOYRL7SJ5ABLJ3GGCUFQXCFIXYUOHZZUDAZJKLXCO32AU",
"selling_asset_type": "native",
"offer_id": 127538671
},
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/158041911595009"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/158041911595009/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=158041911595009"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=158041911595009"
}
},
"id": "158041911595009",
"paging_token": "158041911595009",
"transaction_successful": true,
"source_account": "GBBXM7GVMXZMQWDEKSWGEW6GT6XMPBLEVEPLYWIQF3SRS43AIJVU3QES",
"type": "manage_buy_offer",
"type_i": 12,
"created_at": "2019-11-01T17:06:47Z",
"transaction_hash": "8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134",
"amount": "1.0000000",
"price": "0.5000000",
"price_r": {
"n": 1,
"d": 2
},
"buying_asset_type": "credit_alphanum12",
"buying_asset_code": "MosaiRMBA",
"buying_asset_issuer": "GBBWA24VLGPVMMFMF2OJHW3QHFVSILK2UJSNTORRC6QHK6EPTUADAJFA",
"selling_asset_type": "native",
"offer_id": 127538672
}
]
}
}`
var manageSellBuyOfferOperationsPage = `{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc"
},
"next": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc"
},
"prev": {
"href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc"
}
},
"_embedded": {
"records": [
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/972702718365697"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/972702718365697/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=972702718365697"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=972702718365697"
}
},
"id": "972702718365697",
"paging_token": "972702718365697",
"transaction_successful": true,
"source_account": "GBPPEHGF322UNA62WHRHBCUBCVOIT3SLUY7U7XQEEISZ5B2JLZ3AYTDC",
"type": "manage_offer",
"type_i": 3,
"created_at": "2019-11-13T16:46:36Z",
"transaction_hash": "cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52",
"amount": "1000.0000000",
"price": "0.1312531",
"price_r": {
"n": 265,
"d": 2019
},
"buying_asset_type": "credit_alphanum4",
"buying_asset_code": "BAT",
"buying_asset_issuer": "GBBJMSXCTLXVOYRL7SJ5ABLJ3GGCUFQXCFIXYUOHZZUDAZJKLXCO32AU",
"selling_asset_type": "native",
"offer_id": "127538671"
},
{
"_links": {
"self": {
"href": "https://horizon-testnet.stellar.org/operations/158041911595009"
},
"transaction": {
"href": "https://horizon-testnet.stellar.org/transactions/8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134"
},
"effects": {
"href": "https://horizon-testnet.stellar.org/operations/158041911595009/effects"
},
"succeeds": {
"href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=158041911595009"
},
"precedes": {
"href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=158041911595009"
}
},
"id": "158041911595009",
"paging_token": "158041911595009",
"transaction_successful": true,
"source_account": "GBBXM7GVMXZMQWDEKSWGEW6GT6XMPBLEVEPLYWIQF3SRS43AIJVU3QES",
"type": "manage_buy_offer",
"type_i": 12,
"created_at": "2019-11-01T17:06:47Z",
"transaction_hash": "8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134",
"amount": "1.0000000",
"price": "0.5000000",
"price_r": {
"n": 1,
"d": 2
},
"buying_asset_type": "credit_alphanum12",
"buying_asset_code": "MosaiRMBA",
"buying_asset_issuer": "GBBWA24VLGPVMMFMF2OJHW3QHFVSILK2UJSNTORRC6QHK6EPTUADAJFA",
"selling_asset_type": "native",
"offer_id": "127538672"
}
]
}
}`
| []
| []
| []
| [] | [] | go | null | null | null |
hanger/main.go | package main
import (
"fmt"
"net"
"os"
)
func main() {
fmt.Println("Starting...")
l, err := net.Listen("tcp", fmt.Sprintf(":%s", os.Getenv("PORT")))
if err != nil {
panic(err)
}
fmt.Println("Past the listen... :)")
for {
fmt.Println("about to accept connections")
conn, err := l.Accept()
fmt.Println("i've accepted connections")
if err != nil {
panic(err)
}
fmt.Println("ain't gonna do anything")
go func(c net.Conn) {
fmt.Println("writing nothing")
c.Write([]byte{})
}(conn)
}
} | [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dubzchat_32347.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/xia2/cli/xia2_main.py | import logging
import os
import sys
import time
import traceback
from dials.util import Sorry
from dials.util.version import dials_version
from libtbx import group_args
import xia2.Driver.timing
import xia2.Handlers.Streams
import xia2.XIA2Version
from xia2.Applications.xia2_helpers import process_one_sweep
from xia2.Applications.xia2_main import (
check_environment,
get_command_line,
help,
write_citations,
)
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import cleanup
from xia2.Schema.XProject import XProject
from xia2.Schema.XSweep import XSweep
logger = logging.getLogger("xia2.cli.xia2_main")
def get_ccp4_version():
CCP4 = os.environ.get("CCP4")
if CCP4 is not None:
version_file = os.path.join(CCP4, "lib", "ccp4", "MAJOR_MINOR")
if os.path.exists(version_file):
with open(version_file) as fh:
return fh.read().strip()
def xia2_main(stop_after=None):
"""Actually process something..."""
Citations.cite("xia2")
# print versions of related software
logger.info(dials_version())
ccp4_version = get_ccp4_version()
if ccp4_version:
logger.info("CCP4 %s", ccp4_version)
start_time = time.time()
CommandLine = get_command_line()
# check that something useful has been assigned for processing...
xtals = CommandLine.get_xinfo().get_crystals()
for name, xtal in xtals.items():
if not xtal.get_all_image_names():
logger.info("-----------------------------------" + "-" * len(name))
logger.info("| No images assigned for crystal %s |", name)
logger.info("-----------------------------------" + "-" * len(name))
from xia2.Handlers.Phil import PhilIndex
params = PhilIndex.get_python_object()
mp_params = params.xia2.settings.multiprocessing
njob = mp_params.njob
xinfo = CommandLine.get_xinfo()
logger.info("Project directory: %s", xinfo.path)
if (
params.xia2.settings.developmental.continue_from_previous_job
and os.path.exists("xia2.json")
):
logger.debug("==== Starting from existing xia2.json ====")
xinfo_new = xinfo
xinfo = XProject.from_json(filename="xia2.json")
crystals = xinfo.get_crystals()
crystals_new = xinfo_new.get_crystals()
for crystal_id in crystals_new:
if crystal_id not in crystals:
crystals[crystal_id] = crystals_new[crystal_id]
continue
crystals[crystal_id]._scaler = None # reset scaler
for wavelength_id in crystals_new[crystal_id].get_wavelength_names():
wavelength_new = crystals_new[crystal_id].get_xwavelength(wavelength_id)
if wavelength_id not in crystals[crystal_id].get_wavelength_names():
crystals[crystal_id].add_wavelength(
crystals_new[crystal_id].get_xwavelength(wavelength_new)
)
continue
wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
sweeps_new = wavelength_new.get_sweeps()
sweeps = wavelength.get_sweeps()
sweep_names = {s.get_name() for s in sweeps}
sweep_keys = {
(s.get_directory(), s.get_template(), s.get_image_range())
for s in sweeps
}
for sweep in sweeps_new:
if (
sweep.get_directory(),
sweep.get_template(),
sweep.get_image_range(),
) not in sweep_keys:
if sweep.get_name() in sweep_names:
i = 1
while "SWEEEP%i" % i in sweep_names:
i += 1
sweep._name = "SWEEP%i" % i
break
wavelength.add_sweep(
name=sweep.get_name(),
sample=sweep.sample,
directory=sweep.get_directory(),
image=sweep.get_image(),
beam=sweep.get_beam_centre(),
reversephi=sweep.get_reversephi(),
distance=sweep.get_distance(),
gain=sweep.get_gain(),
dmin=sweep.get_resolution_high(),
dmax=sweep.get_resolution_low(),
polarization=sweep.get_polarization(),
frames_to_process=sweep.get_frames_to_process(),
user_lattice=sweep.get_user_lattice(),
user_cell=sweep.get_user_cell(),
epoch=sweep._epoch,
ice=sweep._ice,
excluded_regions=sweep._excluded_regions,
)
sweep_names.add(sweep.get_name())
crystals = xinfo.get_crystals()
failover = params.xia2.settings.failover
with cleanup(xinfo.path):
if mp_params.mode == "parallel" and njob > 1:
driver_type = mp_params.type
command_line_args = CommandLine.get_argv()[1:]
jobs = []
for crystal_id in crystals:
for wavelength_id in crystals[crystal_id].get_wavelength_names():
wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
sweeps = wavelength.get_sweeps()
for sweep in sweeps:
sweep._get_indexer()
sweep._get_refiner()
sweep._get_integrater()
jobs.append(
(
group_args(
driver_type=driver_type,
stop_after=stop_after,
failover=failover,
command_line_args=command_line_args,
nproc=mp_params.nproc,
crystal_id=crystal_id,
wavelength_id=wavelength_id,
sweep_id=sweep.get_name(),
),
)
)
from xia2.Driver.DriverFactory import DriverFactory
default_driver_type = DriverFactory.get_driver_type()
# run every nth job on the current computer (no need to submit to qsub)
for i_job, arg in enumerate(jobs):
if (i_job % njob) == 0:
arg[0].driver_type = default_driver_type
nproc = mp_params.nproc
qsub_command = mp_params.qsub_command or "qsub"
qsub_command = "%s -V -cwd -pe smp %d" % (qsub_command, nproc)
from libtbx import easy_mp
results = easy_mp.parallel_map(
process_one_sweep,
jobs,
processes=njob,
method="multiprocessing",
qsub_command=qsub_command,
preserve_order=True,
preserve_exception_message=True,
)
# Hack to update sweep with the serialized indexers/refiners/integraters
i_sweep = 0
for crystal_id in crystals:
for wavelength_id in crystals[crystal_id].get_wavelength_names():
wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
remove_sweeps = []
sweeps = wavelength.get_sweeps()
for sweep in sweeps:
success, output, xsweep_dict = results[i_sweep]
if output is not None:
logger.info(output)
if not success:
logger.info("Sweep failed: removing %s", sweep.get_name())
remove_sweeps.append(sweep)
else:
assert xsweep_dict is not None
logger.info("Loading sweep: %s", sweep.get_name())
new_sweep = XSweep.from_dict(xsweep_dict)
sweep._indexer = new_sweep._indexer
sweep._refiner = new_sweep._refiner
sweep._integrater = new_sweep._integrater
i_sweep += 1
for sweep in remove_sweeps:
wavelength.remove_sweep(sweep)
sample = sweep.sample
sample.remove_sweep(sweep)
else:
for crystal_id in list(crystals.keys()):
for wavelength_id in crystals[crystal_id].get_wavelength_names():
wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
remove_sweeps = []
sweeps = wavelength.get_sweeps()
for sweep in sweeps:
from dials.command_line.show import show_experiments
from dxtbx.model.experiment_list import ExperimentListFactory
logger.debug(sweep.get_name())
logger.debug(
show_experiments(
ExperimentListFactory.from_imageset_and_crystal(
sweep.get_imageset(), None
)
)
)
Citations.cite("dials")
try:
if stop_after == "index":
sweep.get_indexer_cell()
else:
sweep.get_integrater_intensities()
sweep.serialize()
except Exception as e:
if failover:
logger.info(
"Processing sweep %s failed: %s",
sweep.get_name(),
str(e),
)
remove_sweeps.append(sweep)
else:
raise
for sweep in remove_sweeps:
wavelength.remove_sweep(sweep)
sample = sweep.sample
sample.remove_sweep(sweep)
# save intermediate xia2.json file in case scaling step fails
xinfo.as_json(filename="xia2.json")
if stop_after not in ("index", "integrate"):
logger.info(xinfo.get_output())
for crystal in list(crystals.values()):
crystal.serialize()
# save final xia2.json file in case report generation fails
xinfo.as_json(filename="xia2.json")
if stop_after not in ("index", "integrate"):
# and the summary file
with open("xia2-summary.dat", "w") as fh:
for record in xinfo.summarise():
fh.write("%s\n" % record)
# looks like this import overwrites the initial command line
# Phil overrides so... for https://github.com/xia2/xia2/issues/150
from .xia2_html import generate_xia2_html
if params.xia2.settings.small_molecule:
params.xia2.settings.report.xtriage_analysis = False
params.xia2.settings.report.include_radiation_damage = False
with xia2.Driver.timing.record_step("xia2.report"):
generate_xia2_html(
xinfo, filename="xia2.html", params=params.xia2.settings.report
)
duration = time.time() - start_time
# write out the time taken in a human readable way
logger.info(
"Processing took %s", time.strftime("%Hh %Mm %Ss", time.gmtime(duration))
)
write_citations()
def run():
if len(sys.argv) < 2 or "-help" in sys.argv or "--help" in sys.argv:
help()
sys.exit()
if "-version" in sys.argv or "--version" in sys.argv:
print(xia2.XIA2Version.Version)
print(dials_version())
ccp4_version = get_ccp4_version()
if ccp4_version:
print("CCP4 %s" % ccp4_version)
sys.exit()
xia2.Handlers.Streams.setup_logging(logfile="xia2.txt", debugfile="xia2-debug.txt")
try:
check_environment()
except Exception as e:
traceback.print_exc(file=open("xia2-error.txt", "w"))
logger.debug(traceback.format_exc())
logger.error("Error setting up xia2 environment: %s" % str(e))
logger.warning(
"Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:"
)
logger.warning("[email protected]")
sys.exit(1)
wd = os.getcwd()
try:
xia2_main()
logger.debug("\nTiming report:")
logger.debug("\n".join(xia2.Driver.timing.report()))
logger.info("Status: normal termination")
return
except Sorry as s:
logger.error("Error: %s", str(s))
sys.exit(1)
except Exception as e:
with open(os.path.join(wd, "xia2-error.txt"), "w") as fh:
traceback.print_exc(file=fh)
logger.debug(traceback.format_exc())
logger.error("Error: %s", str(e))
logger.warning(
"Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:"
)
logger.warning("[email protected]")
sys.exit(1)
| []
| []
| [
"CCP4"
]
| [] | ["CCP4"] | python | 1 | 0 | |
run-command/src/main/java/jp/gr/java_conf/uzresk/aws/ope/runcommand/RunScriptFunction.java | package jp.gr.java_conf.uzresk.aws.ope.runcommand;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.regions.RegionUtils;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.LambdaLogger;
import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagementAsync;
import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagementAsyncClient;
import com.amazonaws.services.simplesystemsmanagement.model.CommandStatus;
import com.amazonaws.services.simplesystemsmanagement.model.NotificationConfig;
import com.amazonaws.services.simplesystemsmanagement.model.SendCommandRequest;
import com.amazonaws.services.simplesystemsmanagement.model.SendCommandResult;
import jp.gr.java_conf.uzresk.aws.lambda.LambdaLock;
import jp.gr.java_conf.uzresk.aws.ope.runcommand.model.RunScriptRequest;
public class RunScriptFunction {
private ClientConfiguration cc;
public void execute(RunScriptRequest rc, Context context) {
LambdaLogger logger = context.getLogger();
logger.log("Run Command Start. Run Configuration:[" + rc + "]");
List<String> instanceIds = rc.getInstanceIds();
boolean isLockAcquisition = new LambdaLock().lock(instanceIds.toString(), context);
if (!isLockAcquisition) {
logger.log("[ERROR][RunScript][" + instanceIds + "]You can not acquire a lock.");
return;
}
String regionName = System.getenv("AWS_DEFAULT_REGION");
AWSSimpleSystemsManagementAsync client = RegionUtils.getRegion(regionName).createClient(
AWSSimpleSystemsManagementAsyncClient.class, new DefaultAWSCredentialsProviderChain(),
getClientConfiguration());
try {
SendCommandRequest req = new SendCommandRequest();
req.setInstanceIds(rc.getInstanceIds());
req.setDocumentName(rc.getDocumentName());
req.setParameters(rc.getParameters());
req.setOutputS3BucketName(rc.getOutputS3BucketName());
req.setOutputS3KeyPrefix(rc.getOutputS3KeyPrefix());
// SNS settings
if (isValidSNSSettings(rc, context)) {
req.setServiceRoleArn(rc.getServiceRoleArn());
NotificationConfig nc = new NotificationConfig();
nc.setNotificationArn(rc.getNotificationArn());
nc.setNotificationEvents(rc.getNotificationEvents());
req.setNotificationConfig(nc);
}
Future<SendCommandResult> result = client.sendCommandAsync(req);
SendCommandResult r;
while (!result.isDone()) {
Thread.sleep(100);
}
r = result.get();
if (CommandStatus.Failed.name().equals(r.getCommand().getStatus())) {
logger.log("[ERROR] execution failure. Commands[" + r.toString() + "]");
} else {
logger.log("[SUCCESS]Execution of RunCommand has completed successfully.[" + rc + "]");
}
} catch (InterruptedException | ExecutionException e) {
logger.log("[ERROR] execution run commands." + e.getMessage());
} finally {
client.shutdown();
}
}
void setClientConfiguration(ClientConfiguration cc) {
this.cc = cc;
}
ClientConfiguration getClientConfiguration() {
if (this.cc == null) {
return new ClientConfiguration();
}
return this.cc;
}
private boolean isValidSNSSettings(RunScriptRequest rc, Context context) {
LambdaLogger logger = context.getLogger();
if (rc.getServiceRoleArn() != null && rc.getNotificationArn() != null && rc.getNotificationEvents() != null) {
return true;
} else {
logger.log("Since the A setting of is missing, skip the SNS[" + rc + "]");
return false;
}
}
}
| [
"\"AWS_DEFAULT_REGION\""
]
| []
| [
"AWS_DEFAULT_REGION"
]
| [] | ["AWS_DEFAULT_REGION"] | java | 1 | 0 | |
tfx/components/transform/executor_test.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.transform.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform.beam import tft_unit
from tfx import types
from tfx.components.testdata.module_file import transform_module
from tfx.components.transform import executor
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from google.protobuf import json_format
class _TempPath(types.Artifact):
TYPE_NAME = 'TempPath'
# TODO(b/122478841): Add more detailed tests.
class ExecutorTest(tft_unit.TransformTestCase):
def _get_source_data_dir(self):
return os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
def _get_output_data_dir(self, sub_dir=None):
test_dir = self._testMethodName
if sub_dir is not None:
test_dir = os.path.join(test_dir, sub_dir)
return os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
test_dir)
def _make_base_do_params(self, source_data_dir, output_data_dir):
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
schema_artifact = standard_artifacts.Schema()
schema_artifact.uri = os.path.join(source_data_dir, 'schema_gen')
self._input_dict = {
executor.EXAMPLES_KEY: [examples],
executor.SCHEMA_KEY: [schema_artifact],
}
# Create output dict.
self._transformed_output = standard_artifacts.TransformGraph()
self._transformed_output.uri = os.path.join(output_data_dir,
'transformed_graph')
self._transformed_examples = standard_artifacts.Examples()
self._transformed_examples.uri = os.path.join(output_data_dir,
'transformed_examples')
temp_path_output = _TempPath()
temp_path_output.uri = tempfile.mkdtemp()
self._updated_analyzer_cache_artifact = standard_artifacts.TransformCache()
self._updated_analyzer_cache_artifact.uri = os.path.join(
self._output_data_dir, 'CACHE')
self._output_dict = {
executor.TRANSFORM_GRAPH_KEY: [self._transformed_output],
executor.TRANSFORMED_EXAMPLES_KEY: [self._transformed_examples],
executor.TEMP_PATH_KEY: [temp_path_output],
executor.UPDATED_ANALYZER_CACHE_KEY: [
self._updated_analyzer_cache_artifact
],
}
# Create exec properties skeleton.
self._exec_properties = {}
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = self._get_source_data_dir()
self._output_data_dir = self._get_output_data_dir()
self._make_base_do_params(self._source_data_dir, self._output_data_dir)
# Create exec properties skeleton.
self._module_file = os.path.join(self._source_data_dir,
'module_file/transform_module.py')
self._preprocessing_fn = '%s.%s' % (
transform_module.preprocessing_fn.__module__,
transform_module.preprocessing_fn.__name__)
self._exec_properties['splits_config'] = None
# Executor for test.
self._transform_executor = executor.Executor()
def _verify_transform_outputs(self, materialize=True, store_cache=True):
expected_outputs = ['transformed_graph']
if store_cache:
expected_outputs.append('CACHE')
self.assertNotEqual(
0,
len(tf.io.gfile.listdir(self._updated_analyzer_cache_artifact.uri)))
if materialize:
expected_outputs.append('transformed_examples')
train_pattern = os.path.join(self._transformed_examples.uri, 'train', '*')
train_files = tf.io.gfile.glob(train_pattern)
self.assertNotEqual(0, len(train_files))
train_dataset = tf.data.TFRecordDataset(
train_files, compression_type='GZIP')
train_count = sum(1 for record in train_dataset)
eval_pattern = os.path.join(self._transformed_examples.uri, 'eval', '*')
eval_files = tf.io.gfile.glob(eval_pattern)
self.assertNotEqual(0, len(eval_files))
eval_dataset = tf.data.TFRecordDataset(
eval_files, compression_type='GZIP')
eval_count = sum(1 for record in eval_dataset)
self.assertGreater(train_count, eval_count)
# Depending on `materialize` and `store_cache`, check that
# expected outputs are exactly correct. If either flag is False, its
# respective output should not be present.
self.assertCountEqual(expected_outputs,
tf.io.gfile.listdir(self._output_data_dir))
path_to_saved_model = os.path.join(
self._transformed_output.uri, tft.TFTransformOutput.TRANSFORM_FN_DIR,
tf.saved_model.SAVED_MODEL_FILENAME_PB)
self.assertTrue(tf.io.gfile.exists(path_to_saved_model))
def _run_pipeline_get_metrics(self):
pipelines = []
def _create_pipeline_wrapper(*_):
result = self._makeTestPipeline()
pipelines.append(result)
return result
with tft_unit.mock.patch.object(
executor.Executor,
'_CreatePipeline',
autospec=True,
side_effect=_create_pipeline_wrapper):
transform_executor = executor.Executor()
transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
assert len(pipelines) == 1
return pipelines[0].metrics
def test_do_with_module_file(self):
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_preprocessing_fn(self):
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_materialization_disabled(self):
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
del self._output_dict[executor.TRANSFORMED_EXAMPLES_KEY]
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(materialize=False)
def test_do_with_cache_materialization_disabled(self):
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
del self._output_dict[executor.UPDATED_ANALYZER_CACHE_KEY]
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(store_cache=False)
def test_do_with_preprocessing_fn_custom_config(self):
self._exec_properties['preprocessing_fn'] = '%s.%s' % (
transform_module.preprocessing_fn.__module__,
transform_module.preprocessing_fn.__name__)
self._exec_properties['custom_config'] = json.dumps({
'VOCAB_SIZE': 1000,
'OOV_SIZE': 10
})
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_preprocessing_fn_and_none_custom_config(self):
self._exec_properties['preprocessing_fn'] = '%s.%s' % (
transform_module.preprocessing_fn.__module__,
transform_module.preprocessing_fn.__name__)
self._exec_properties['custom_config'] = json.dumps(None)
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_no_preprocessing_fn(self):
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_duplicate_preprocessing_fn(self):
self._exec_properties['module_file'] = self._module_file
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_custom_splits(self):
self._exec_properties['splits_config'] = json_format.MessageToJson(
transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval']),
preserving_proto_field_name=True)
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs()
def test_do_with_empty_analyze_splits(self):
self._exec_properties['splits_config'] = json_format.MessageToJson(
transform_pb2.SplitsConfig(analyze=[], transform=['train', 'eval']),
preserving_proto_field_name=True)
self._exec_properties['module_file'] = self._module_file
with self.assertRaises(ValueError):
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
def test_do_with_empty_transform_splits(self):
self._exec_properties['splits_config'] = json_format.MessageToJson(
transform_pb2.SplitsConfig(analyze=['train'], transform=[]),
preserving_proto_field_name=True)
self._exec_properties['module_file'] = self._module_file
self._transformed_examples.split_names = artifact_utils.encode_split_names(
[])
self._output_dict[executor.TRANSFORMED_EXAMPLES_KEY] = [
self._transformed_examples
]
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertFalse(
tf.io.gfile.exists(
os.path.join(self._transformed_examples.uri, 'train')))
self.assertFalse(
tf.io.gfile.exists(
os.path.join(self._transformed_examples.uri, 'eval')))
path_to_saved_model = os.path.join(self._transformed_output.uri,
tft.TFTransformOutput.TRANSFORM_FN_DIR,
tf.saved_model.SAVED_MODEL_FILENAME_PB)
self.assertTrue(tf.io.gfile.exists(path_to_saved_model))
def test_counters(self):
self._exec_properties['preprocessing_fn'] = self._preprocessing_fn
metrics = self._run_pipeline_get_metrics()
# The test data has 10036 instances in the train dataset, and 4964 instances
# in the eval dataset (obtained by running:
# gqui third_party/py/tfx/components/testdata/csv_example_gen/train/data* \
# 'select count(*)'
# )
# Since the analysis dataset (train) is read twice (once for analysis and
# once for transform), the expected value of the num_instances counter is:
# 10036 * 2 + 4964 = 25036.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909)
# We expect 2 saved_models to be created because this is a 1 phase analysis
# preprocessing_fn.
self.assertMetricsCounterEqual(metrics, 'saved_models_created', 2)
# This should be the size of the preprocessing_fn's inputs dictionary which
# is 18 according to the schema.
self.assertMetricsCounterEqual(metrics, 'total_columns_count', 18)
# There are 9 features that are passed into tft analyzers in the
# preprocessing_fn.
self.assertMetricsCounterEqual(metrics, 'analyze_columns_count', 9)
# In addition, 7 features go through a pure TF map, not including the label,
# so we expect 9 + 7 + 1 = 17 transform columns.
self.assertMetricsCounterEqual(metrics, 'transform_columns_count', 17)
# There should be 1 path used for analysis since that's what input_dict
# specifies.
self.assertMetricsCounterEqual(metrics, 'analyze_paths_count', 1)
def test_do_with_cache(self):
# First run that creates cache.
self._exec_properties['module_file'] = self._module_file
metrics = self._run_pipeline_get_metrics()
# The test data has 10036 instances in the train dataset, and 4964 instances
# in the eval dataset. Since the analysis dataset (train) is read twice when
# no input cache is present (once for analysis and once for transform), the
# expected value of the num_instances counter is: 10036 * 2 + 4964 = 25036.
self.assertMetricsCounterEqual(metrics, 'num_instances', 24909)
self._verify_transform_outputs(store_cache=True)
# Second run from cache.
self._output_data_dir = self._get_output_data_dir('2nd_run')
analyzer_cache_artifact = standard_artifacts.TransformCache()
analyzer_cache_artifact.uri = self._updated_analyzer_cache_artifact.uri
self._make_base_do_params(self._source_data_dir, self._output_data_dir)
self._input_dict[executor.ANALYZER_CACHE_KEY] = [analyzer_cache_artifact]
self._exec_properties['module_file'] = self._module_file
metrics = self._run_pipeline_get_metrics()
# Since input cache should now cover all analysis (train) paths, the train
# and eval sets are each read exactly once for transform. Thus, the
# expected value of the num_instances counter is: 10036 + 4964 = 15000.
self.assertMetricsCounterEqual(metrics, 'num_instances', 15000)
self._verify_transform_outputs(store_cache=True)
@tft_unit.mock.patch.object(executor, '_MAX_ESTIMATED_STAGES_COUNT', 21)
def test_do_with_cache_disabled_too_many_stages(self):
self._exec_properties['module_file'] = self._module_file
self._transform_executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self._verify_transform_outputs(store_cache=False)
self.assertFalse(
tf.io.gfile.exists(self._updated_analyzer_cache_artifact.uri))
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
functional/FunctionalTest.java | /*
* MinIO Java SDK for Amazon S3 Compatible Cloud Storage,
* (C) 2015-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static java.nio.file.StandardOpenOption.*;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.minio.*;
import io.minio.errors.*;
import io.minio.messages.*;
import java.io.*;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.nio.file.*;
import java.security.*;
import java.time.*;
import java.util.*;
import java.util.concurrent.TimeUnit;
import javax.crypto.KeyGenerator;
import javax.crypto.spec.SecretKeySpec;
import okhttp3.HttpUrl;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
import okio.BufferedSink;
import okio.Okio;
@SuppressFBWarnings(
value = "REC",
justification = "Allow catching super class Exception since it's tests")
public class FunctionalTest {
private static final String OS = System.getProperty("os.name").toLowerCase(Locale.US);
private static final String MINIO_BINARY;
private static final String PASS = "PASS";
private static final String FAILED = "FAIL";
private static final String IGNORED = "NA";
private static final int KB = 1024;
private static final int MB = 1024 * 1024;
private static final Random random = new Random(new SecureRandom().nextLong());
private static final String customContentType = "application/javascript";
private static final String nullContentType = null;
private static String bucketName = getRandomName();
private static boolean mintEnv = false;
private static Path dataFile1Kb;
private static Path dataFile6Mb;
private static String endpoint;
private static String accessKey;
private static String secretKey;
private static String region;
private static MinioClient client = null;
static {
String binaryName = "minio";
if (OS.contains("windows")) {
binaryName = "minio.exe";
}
MINIO_BINARY = binaryName;
}
/** Do no-op. */
public static void ignore(Object... args) {}
/** Create given sized file and returns its name. */
public static String createFile(int size) throws IOException {
String filename = getRandomName();
try (OutputStream os = Files.newOutputStream(Paths.get(filename), CREATE, APPEND)) {
int totalBytesWritten = 0;
int bytesToWrite = 0;
byte[] buf = new byte[1 * MB];
while (totalBytesWritten < size) {
random.nextBytes(buf);
bytesToWrite = size - totalBytesWritten;
if (bytesToWrite > buf.length) {
bytesToWrite = buf.length;
}
os.write(buf, 0, bytesToWrite);
totalBytesWritten += bytesToWrite;
}
}
return filename;
}
/** Create 1 KB temporary file. */
public static String createFile1Kb() throws IOException {
if (mintEnv) {
String filename = getRandomName();
Files.createSymbolicLink(Paths.get(filename).toAbsolutePath(), dataFile1Kb);
return filename;
}
return createFile(1 * KB);
}
/** Create 6 MB temporary file. */
public static String createFile6Mb() throws IOException {
if (mintEnv) {
String filename = getRandomName();
Files.createSymbolicLink(Paths.get(filename).toAbsolutePath(), dataFile6Mb);
return filename;
}
return createFile(6 * MB);
}
/** Generate random name. */
public static String getRandomName() {
return "minio-java-test-" + new BigInteger(32, random).toString(32);
}
/** Returns byte array contains all data in given InputStream. */
public static byte[] readAllBytes(InputStream is) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[16384];
while ((nRead = is.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
return buffer.toByteArray();
}
/** Prints a success log entry in JSON format. */
public static void mintSuccessLog(String function, String args, long startTime) {
if (mintEnv) {
System.out.println(
new MintLogger(
function, args, System.currentTimeMillis() - startTime, PASS, null, null, null));
}
}
/** Prints a failure log entry in JSON format. */
public static void mintFailedLog(
String function, String args, long startTime, String message, String error) {
if (mintEnv) {
System.out.println(
new MintLogger(
function,
args,
System.currentTimeMillis() - startTime,
FAILED,
null,
message,
error));
}
}
/** Prints a ignore log entry in JSON format. */
public static void mintIgnoredLog(String function, String args, long startTime) {
if (mintEnv) {
System.out.println(
new MintLogger(
function, args, System.currentTimeMillis() - startTime, IGNORED, null, null, null));
}
}
/** Read object content of the given url. */
public static byte[] readObject(String urlString) throws Exception {
Request.Builder requestBuilder = new Request.Builder();
Request request = requestBuilder.url(HttpUrl.parse(urlString)).method("GET", null).build();
OkHttpClient transport =
new OkHttpClient()
.newBuilder()
.connectTimeout(20, TimeUnit.SECONDS)
.writeTimeout(20, TimeUnit.SECONDS)
.readTimeout(20, TimeUnit.SECONDS)
.build();
Response response = transport.newCall(request).execute();
try {
if (response.isSuccessful()) {
return response.body().bytes();
}
String errorXml = new String(response.body().bytes(), StandardCharsets.UTF_8);
throw new Exception(
"failed to create object. Response: " + response + ", Response body: " + errorXml);
} finally {
response.close();
}
}
/** Write data to given object url. */
public static void writeObject(String urlString, byte[] dataBytes) throws Exception {
Request.Builder requestBuilder = new Request.Builder();
// Set header 'x-amz-acl' to 'bucket-owner-full-control', so objects created
// anonymously, can be downloaded by bucket owner in AWS S3.
Request request =
requestBuilder
.url(HttpUrl.parse(urlString))
.method("PUT", RequestBody.create(null, dataBytes))
.addHeader("x-amz-acl", "bucket-owner-full-control")
.build();
OkHttpClient transport =
new OkHttpClient()
.newBuilder()
.connectTimeout(20, TimeUnit.SECONDS)
.writeTimeout(20, TimeUnit.SECONDS)
.readTimeout(20, TimeUnit.SECONDS)
.build();
Response response = transport.newCall(request).execute();
try {
if (!response.isSuccessful()) {
String errorXml = new String(response.body().bytes(), StandardCharsets.UTF_8);
throw new Exception(
"failed to create object. Response: " + response + ", Response body: " + errorXml);
}
} finally {
response.close();
}
}
/** Test: makeBucket(String bucketName). */
public static void makeBucket_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: makeBucket(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String name = getRandomName();
client.makeBucket(name);
client.removeBucket(name);
mintSuccessLog("makeBucket(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"makeBucket(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: makeBucket(String bucketName, String region). */
public static void makeBucketwithRegion_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: makeBucket(String bucketName, String region)");
}
long startTime = System.currentTimeMillis();
try {
String name = getRandomName();
client.makeBucket(name, "eu-west-1");
client.removeBucket(name);
mintSuccessLog(
"makeBucket(String bucketName, String region)", "region: eu-west-1", startTime);
} catch (Exception e) {
mintFailedLog(
"makeBucket(String bucketName, String region)",
"region: eu-west-1",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: makeBucket(String bucketName, String region) where bucketName has periods in its name.
*/
public static void makeBucketWithPeriod_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: makeBucket(String bucketName, String region)");
}
long startTime = System.currentTimeMillis();
String name = getRandomName() + ".withperiod";
try {
client.makeBucket(name, "eu-central-1");
client.removeBucket(name);
mintSuccessLog(
"makeBucket(String bucketName, String region)",
"name: " + name + ", region: eu-central-1",
startTime);
} catch (Exception e) {
mintFailedLog(
"makeBucket(String bucketName, String region)",
"name: " + name + ", region: eu-central-1",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listBuckets(). */
public static void listBuckets_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: listBuckets()");
}
long startTime = System.currentTimeMillis();
try {
long nowSeconds = ZonedDateTime.now().toEpochSecond();
String bucketName = getRandomName();
boolean found = false;
client.makeBucket(bucketName);
for (Bucket bucket : client.listBuckets()) {
if (bucket.name().equals(bucketName)) {
if (found) {
throw new Exception(
"[FAILED] duplicate entry " + bucketName + " found in list buckets");
}
found = true;
// excuse 15 minutes
if ((bucket.creationDate().toEpochSecond() - nowSeconds) > (15 * 60)) {
throw new Exception(
"[FAILED] bucket creation time too apart in "
+ (bucket.creationDate().toEpochSecond() - nowSeconds)
+ " seconds");
}
}
}
client.removeBucket(bucketName);
if (!found) {
throw new Exception("[FAILED] created bucket not found in list buckets");
}
mintSuccessLog("listBuckets()", null, startTime);
} catch (Exception e) {
mintFailedLog(
"listBuckets()",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: bucketExists(String bucketName). */
public static void bucketExists_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: bucketExists(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String name = getRandomName();
client.makeBucket(name);
if (!client.bucketExists(name)) {
throw new Exception("[FAILED] bucket does not exist");
}
client.removeBucket(name);
mintSuccessLog("bucketExists(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"bucketExists(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: removeBucket(String bucketName). */
public static void removeBucket_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: removeBucket(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String name = getRandomName();
client.makeBucket(name);
client.removeBucket(name);
mintSuccessLog("removeBucket(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"removeBucket(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Tear down test setup. */
public static void setup() throws Exception {
client.makeBucket(bucketName);
}
/** Tear down test setup. */
public static void teardown() throws Exception {
client.removeBucket(bucketName);
}
/**
* Test: putObject(String bucketName, String objectName, String filename, PutObjectOptions
* options)
*/
public static void putObject_test1() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, String filename, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String filename = createFile1Kb();
client.putObject(bucketName, filename, filename, null);
Files.delete(Paths.get(filename));
client.removeObject(bucketName, filename);
mintSuccessLog(methodName, "filename: 1KB", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"filename: 1KB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: multipart: putObject(String bucketName, String objectName, String filename,
* PutObjectOptions options)
*/
public static void putObject_test2() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, String filename, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: multipart: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String filename = createFile6Mb();
client.putObject(bucketName, filename, filename, new PutObjectOptions(6 * MB, 5 * MB));
Files.delete(Paths.get(filename));
client.removeObject(bucketName, filename);
mintSuccessLog(methodName, "filename: 6MB", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"filename: 6MB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with content-type: putObject(String bucketName, String objectName, String filename,
* PutObjectOptions options)
*/
public static void putObject_test3() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, String filename, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with content-type: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String filename = createFile1Kb();
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setContentType(customContentType);
client.putObject(bucketName, filename, filename, options);
Files.delete(Paths.get(filename));
client.removeObject(bucketName, filename);
mintSuccessLog(methodName, "contentType: " + customContentType, startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"contentType: " + customContentType,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions
* options)
*/
public static void putObject_test4() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setContentType(customContentType);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "size: 1 KB, objectName: " + customContentType, startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"size: 1 KB, objectName: " + customContentType,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: object name with multiple path segments: putObject(String bucketName, String objectName,
* InputStream stream, PutObjectOptions options)
*/
public static void putObject_test5() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: object name with path segments: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = "path/to/" + getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setContentType(customContentType);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "size: 1 KB, contentType: " + customContentType, startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"size: 1 KB, contentType: " + customContentType,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: unknown size stream: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options)
*/
public static void putObject_test6() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: unknown size stream: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(3 * KB)) {
PutObjectOptions options = new PutObjectOptions(is.available(), -1);
options.setContentType(customContentType);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "size: -1, contentType: " + customContentType, startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"size: -1, contentType: " + customContentType,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: multipart unknown size stream: putObject(String bucketName, String objectName,
* InputStream stream, PutObjectOptions options)
*/
public static void putObject_test7() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: multipart unknown size stream: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(11 * MB)) {
PutObjectOptions options = new PutObjectOptions(is.available(), -1);
options.setContentType(customContentType);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "size: -1, contentType: " + customContentType, startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"size: -1, contentType: " + customContentType,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with user metadata: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options).
*/
public static void putObject_test8() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with user metadata: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("X-Amz-Meta-mykey", "myvalue");
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setHeaders(headerMap);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "X-Amz-Meta-mykey: myvalue", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"X-Amz-Meta-mykey: myvalue",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with storage class REDUCED_REDUNDANCY: putObject(String bucketName, String objectName,
* InputStream stream, PutObjectOptions options).
*/
public static void putObject_test9() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with storage class REDUCED_REDUNDANCY: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("X-Amz-Storage-Class", "REDUCED_REDUNDANCY");
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setHeaders(headerMap);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "X-Amz-Storage-Class: REDUCED_REDUNDANCY", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"X-Amz-Storage-Class: REDUCED_REDUNDANCY",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with storage class STANDARD: putObject(String bucketName, String objectName, InputStream
* stream, PutObjectOptions options).
*/
public static void putObject_test10() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with storage class STANDARD: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("X-Amz-Storage-Class", "STANDARD");
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setHeaders(headerMap);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "X-Amz-Storage-Class: STANDARD", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"X-Amz-Storage-Class: STANDARD",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with storage class INVALID: putObject(String bucketName, String objectName, InputStream
* stream, PutObjectOptions options).
*/
public static void putObject_test11() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with storage class INVALID: " + methodName);
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("X-Amz-Storage-Class", "INVALID");
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setHeaders(headerMap);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.INVALID_STORAGE_CLASS) {
mintFailedLog(
methodName,
"X-Amz-Storage-Class: INVALID",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
} catch (Exception e) {
mintFailedLog(
methodName,
"X-Amz-Storage-Class: INVALID",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
mintSuccessLog(methodName, "X-Amz-Storage-Class: INVALID", startTime);
}
/**
* Test: with SSE_C: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options).
*/
public static void putObject_test12() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with SSE_C: " + methodName);
}
long startTime = System.currentTimeMillis();
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryption sse = ServerSideEncryption.withCustomerKey(keyGen.generateKey());
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "Server-side encryption: SSE_C", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"Server-side encryption: SSE_C",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: multipart with SSE_C: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options).
*/
public static void putObject_test13() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: multipart with SSE_C: " + methodName);
}
long startTime = System.currentTimeMillis();
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryption sse = ServerSideEncryption.withCustomerKey(keyGen.generateKey());
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(11 * MB)) {
PutObjectOptions options = new PutObjectOptions(-1, 5 * MB);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "Size: 11 MB, Server-side encryption: SSE_C", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"Size: 11 MB, Server-side encryption: SSE_C",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with SSE_S3: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options).
*/
public static void putObject_test14() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with SSE_S3: " + methodName);
}
long startTime = System.currentTimeMillis();
ServerSideEncryption sse = ServerSideEncryption.atRest();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "Server-side encryption: SSE_S3", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"Server-side encryption: SSE_S3",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: with SSE_KMS: putObject(String bucketName, String objectName, InputStream stream,
* PutObjectOptions options).
*/
public static void putObject_test15() throws Exception {
String methodName =
"putObject(String bucketName, String objectName, InputStream stream, PutObjectOptions options)";
if (!mintEnv) {
System.out.println("Test: with SSE_KMS: " + methodName);
}
long startTime = System.currentTimeMillis();
if (System.getenv("MINT_KEY_ID").equals("")) {
mintIgnoredLog(methodName, "Server-side encryption: SSE_KMS", startTime);
}
Map<String, String> myContext = new HashMap<>();
myContext.put("key1", "value1");
ServerSideEncryption sse = ServerSideEncryption.withManagedKeys("keyId", myContext);
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
client.removeObject(bucketName, objectName);
mintSuccessLog(methodName, "Server-side encryption: SSE_KMS", startTime);
} catch (Exception e) {
mintFailedLog(
methodName,
"Server-side encryption: SSE_KMS",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: statObject(String bucketName, String objectName). */
public static void statObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: statObject(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("Content-Type", customContentType);
headerMap.put("my-custom-data", "foo");
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setHeaders(headerMap);
options.setContentType(customContentType);
client.putObject(bucketName, objectName, is, options);
}
ObjectStat objectStat = client.statObject(bucketName, objectName);
if (!(objectName.equals(objectStat.name())
&& (objectStat.length() == 1)
&& bucketName.equals(objectStat.bucketName())
&& objectStat.contentType().equals(customContentType))) {
throw new Exception("[FAILED] object stat differs");
}
Map<String, List<String>> httpHeaders = objectStat.httpHeaders();
if (!httpHeaders.containsKey("x-amz-meta-my-custom-data")) {
throw new Exception("[FAILED] metadata not found in object stat");
}
List<String> values = httpHeaders.get("x-amz-meta-my-custom-data");
if (values.size() != 1) {
throw new Exception("[FAILED] too many metadata value. expected: 1, got: " + values.size());
}
if (!values.get(0).equals("foo")) {
throw new Exception("[FAILED] wrong metadata value. expected: foo, got: " + values.get(0));
}
client.removeObject(bucketName, objectName);
mintSuccessLog("statObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"statObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: statObject(String bucketName, String objectName, ServerSideEncryption sse). To test
* statObject using SSE_C.
*/
public static void statObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: statObject(String bucketName, String objectName, ServerSideEncryption sse)"
+ " using SSE_C.");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
// Generate a new 256 bit AES key - This key must be remembered by the client.
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryption sse = ServerSideEncryption.withCustomerKey(keyGen.generateKey());
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
ObjectStat objectStat = client.statObject(bucketName, objectName, sse);
if (!(objectName.equals(objectStat.name())
&& (objectStat.length() == 1)
&& bucketName.equals(objectStat.bucketName()))) {
throw new Exception("[FAILED] object stat differs");
}
Map<String, List<String>> httpHeaders = objectStat.httpHeaders();
if (!httpHeaders.containsKey("X-Amz-Server-Side-Encryption-Customer-Algorithm")) {
throw new Exception("[FAILED] metadata not found in object stat");
}
List<String> values = httpHeaders.get("X-Amz-Server-Side-Encryption-Customer-Algorithm");
if (values.size() != 1) {
throw new Exception("[FAILED] too many metadata value. expected: 1, got: " + values.size());
}
if (!values.get(0).equals("AES256")) {
throw new Exception(
"[FAILED] wrong metadata value. expected: AES256, got: " + values.get(0));
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"statObject(String bucketName, String objectName, ServerSideEncryption sse)"
+ " using SSE_C.",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"statObject(String bucketName, String objectName, ServerSideEncryption sse)"
+ " using SSE_C.",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: statObject(String bucketName, "randomName/"). */
public static void statObject_test3() throws Exception {
if (!mintEnv) {
System.out.println("Test: statObject(String bucketName, \"randomName/\")");
}
long startTime = System.currentTimeMillis();
try {
client.statObject(bucketName, getRandomName() + "/");
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.NO_SUCH_KEY) {
mintFailedLog(
"statObject(String bucketName, \"randomName/\")",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
} catch (Exception e) {
mintFailedLog(
"statObject(String bucketName, \"randomName/\")",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
} finally {
mintSuccessLog("statObject(String bucketName, \"randomName/\"`)", null, startTime);
}
}
/** Test: getObject(String bucketName, String objectName). */
public static void getObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: getObject(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.getObject(bucketName, objectName).close();
client.removeObject(bucketName, objectName);
mintSuccessLog("getObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getObject(String bucketName, String objectName, long offset). */
public static void getObject_test2() throws Exception {
if (!mintEnv) {
System.out.println("Test: getObject(String bucketName, String objectName, long offset)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.getObject(bucketName, objectName, 1000L).close();
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, long offset)",
"offset: 1000",
startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, long offset)",
"offset: 1000",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getObject(String bucketName, String objectName, long offset, Long length). */
public static void getObject_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: getObject(String bucketName, String objectName, long offset, Long length)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(6 * MB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(6 * MB, -1));
}
client.getObject(bucketName, objectName, 1000L, 64 * 1024L).close();
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, long offset, Long length)",
"offset: 1000, length: 64 KB",
startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, long offset, Long length)",
"offset: 1000, length: 64 KB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getObject(String bucketName, String objectName, String filename). */
public static void getObject_test4() throws Exception {
if (!mintEnv) {
System.out.println("Test: getObject(String bucketName, String objectName, String filename)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.getObject(bucketName, objectName, objectName + ".downloaded");
Files.delete(Paths.get(objectName + ".downloaded"));
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, String filename)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, String filename)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: getObject(String bucketName, String objectName, String filename). where objectName has
* multiple path segments.
*/
public static void getObject_test5() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: objectName with multiple path segments: "
+ "getObject(String bucketName, String objectName, String filename)");
}
long startTime = System.currentTimeMillis();
String baseObjectName = getRandomName();
String objectName = "path/to/" + baseObjectName;
try {
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.getObject(bucketName, objectName, baseObjectName + ".downloaded");
Files.delete(Paths.get(baseObjectName + ".downloaded"));
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, String filename)",
"objectName: " + objectName,
startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, String filename)",
"objectName: " + objectName,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getObject(String bucketName, String objectName) zero size object. */
public static void getObject_test6() throws Exception {
if (!mintEnv) {
System.out.println("Test: getObject(String bucketName, String objectName) zero size object");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(0)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(0, -1));
}
client.getObject(bucketName, objectName).close();
client.removeObject(bucketName, objectName);
mintSuccessLog("getObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: getObject(String bucketName, String objectName, ServerSideEncryption sse). To test
* getObject when object is put using SSE_C.
*/
public static void getObject_test7() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: getObject(String bucketName, String objectName, ServerSideEncryption sse) using SSE_C");
}
long startTime = System.currentTimeMillis();
// Generate a new 256 bit AES key - This key must be remembered by the client.
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryption sse = ServerSideEncryption.withCustomerKey(keyGen.generateKey());
try {
String objectName = getRandomName();
String putString;
int bytes_read_put;
try (final InputStream is = new ContentInputStream(1 * KB)) {
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
byte[] putbyteArray = new byte[is.available()];
bytes_read_put = is.read(putbyteArray);
putString = new String(putbyteArray, StandardCharsets.UTF_8);
}
InputStream stream = client.getObject(bucketName, objectName, sse);
byte[] getbyteArray = new byte[stream.available()];
int bytes_read_get = stream.read(getbyteArray);
String getString = new String(getbyteArray, StandardCharsets.UTF_8);
stream.close();
// Compare if contents received are same as the initial uploaded object.
if ((!putString.equals(getString)) || (bytes_read_put != bytes_read_get)) {
throw new Exception("Contents received from getObject doesn't match initial contents.");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, ServerSideEncryption sse)"
+ " using SSE_C.",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, ServerSideEncryption sse)"
+ " using SSE_C.",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: getObject(String bucketName, String objectName, long offset, Long length) with offset=0.
*/
public static void getObject_test8() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: getObject(String bucketName, String objectName, long offset, Long length) with offset=0");
}
final long startTime = System.currentTimeMillis();
final int fullLength = 1024;
final int partialLength = 256;
final long offset = 0L;
final String objectName = getRandomName();
try {
try (final InputStream is = new ContentInputStream(fullLength)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(fullLength, -1));
}
try (final InputStream partialObjectStream =
client.getObject(bucketName, objectName, offset, Long.valueOf(partialLength))) {
byte[] result = new byte[fullLength];
final int read = partialObjectStream.read(result);
result = Arrays.copyOf(result, read);
if (result.length != partialLength) {
throw new Exception(
String.format(
"Expecting only the first %d bytes from partial getObject request; received %d bytes instead.",
partialLength, read));
}
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, long offset, Long length) with offset=0",
String.format("offset: %d, length: %d bytes", offset, partialLength),
startTime);
} catch (final Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, long offset, Long length) with offset=0",
String.format("offset: %d, length: %d bytes", offset, partialLength),
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: getObject(String bucketName, String objectName, ServerSideEncryption sse, String
* fileName).
*/
public static void getObject_test9() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: getObject(String bucketName, String objectName, ServerSideEncryption sse, String fileName)");
}
long startTime = System.currentTimeMillis();
// Generate a new 256 bit AES key - This key must be remembered by the client.
KeyGenerator keyGen = KeyGenerator.getInstance("AES");
keyGen.init(256);
ServerSideEncryption sse = ServerSideEncryption.withCustomerKey(keyGen.generateKey());
try {
String objectName = getRandomName();
String filename = createFile1Kb();
PutObjectOptions options = new PutObjectOptions(1 * KB, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, filename, options);
client.getObject(bucketName, objectName, sse, objectName + ".downloaded");
Files.delete(Paths.get(objectName + ".downloaded"));
client.removeObject(bucketName, objectName);
mintSuccessLog(
"getObject(String bucketName, String objectName, ServerSideEncryption sse, "
+ "String filename). To test SSE_C",
"size: 1 KB",
startTime);
} catch (Exception e) {
mintFailedLog(
"getObject(String bucketName, String objectName, ServerSideEncryption sse, "
+ "String filename). To test SSE_C",
"size: 1 KB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listObjects(final String bucketName). */
public static void listObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: listObjects(final String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String[] objectNames = new String[3];
int i = 0;
for (i = 0; i < 3; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
i = 0;
for (Result<?> r : client.listObjects(bucketName)) {
ignore(i++, r.get());
if (i == 3) {
break;
}
}
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog("listObjects(final String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listObjects(bucketName, final String prefix). */
public static void listObject_test2() throws Exception {
if (!mintEnv) {
System.out.println("Test: listObjects(final String bucketName, final String prefix)");
}
long startTime = System.currentTimeMillis();
try {
String[] objectNames = new String[3];
int i = 0;
for (i = 0; i < 3; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
i = 0;
for (Result<?> r : client.listObjects(bucketName, "minio")) {
ignore(i++, r.get());
if (i == 3) {
break;
}
}
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog(
"listObjects(final String bucketName, final String prefix)", "prefix :minio", startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName, final String prefix)",
"prefix :minio",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listObjects(bucketName, final String prefix, final boolean recursive). */
public static void listObject_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: listObjects(final String bucketName, final String prefix, final boolean recursive)");
}
long startTime = System.currentTimeMillis();
try {
String[] objectNames = new String[3];
int i = 0;
for (i = 0; i < 3; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
i = 0;
for (Result<?> r : client.listObjects(bucketName, "minio", true)) {
ignore(i++, r.get());
if (i == 3) {
break;
}
}
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minio, recursive: true",
startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minio, recursive: true",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listObjects(final string bucketName). */
public static void listObject_test4() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: empty bucket: listObjects(final String bucketName, final String prefix,"
+ " final boolean recursive)");
}
long startTime = System.currentTimeMillis();
try {
int i = 0;
for (Result<?> r : client.listObjects(bucketName, "minioemptybucket", true)) {
ignore(i++, r.get());
if (i == 3) {
break;
}
}
mintSuccessLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minioemptybucket, recursive: true",
startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minioemptybucket, recursive: true",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: recursive: listObjects(bucketName, final String prefix, final boolean recursive). */
public static void listObject_test5() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: recursive: listObjects(final String bucketName, final String prefix, "
+ "final boolean recursive)");
}
long startTime = System.currentTimeMillis();
try {
int objCount = 1050;
String[] objectNames = new String[objCount];
int i = 0;
for (i = 0; i < objCount; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
i = 0;
for (Result<?> r : client.listObjects(bucketName, "minio", true)) {
ignore(i++, r.get());
}
// Check the number of uploaded objects
if (i != objCount) {
throw new Exception("item count differs, expected: " + objCount + ", got: " + i);
}
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minio, recursive: true",
startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName, final String prefix, final boolean recursive)",
"prefix :minio, recursive: true",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: listObjects(bucketName, final String prefix, final boolean recursive, final boolean
* useVersion1).
*/
public static void listObject_test6() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: listObjects(final String bucketName, final String prefix, final boolean recursive, "
+ "final boolean useVersion1)");
}
long startTime = System.currentTimeMillis();
try {
String[] objectNames = new String[3];
int i = 0;
for (i = 0; i < 3; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
i = 0;
for (Result<?> r : client.listObjects(bucketName, "minio", true, true)) {
ignore(i++, r.get());
if (i == 3) {
break;
}
}
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog(
"listObjects(final String bucketName, final String prefix, "
+ "final boolean recursive, final boolean useVersion1)",
"prefix :minio, recursive: true, useVersion1: true",
startTime);
} catch (Exception e) {
mintFailedLog(
"listObjects(final String bucketName, final String prefix, "
+ "final boolean recursive, final boolean useVersion1)",
"prefix :minio, recursive: true, useVersion1: true",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: removeObject(String bucketName, String objectName). */
public static void removeObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: removeObject(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1, -1));
}
client.removeObject(bucketName, objectName);
mintSuccessLog("removeObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"removeObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: removeObjects(final String bucketName, final Iterable<String> objectNames). */
public static void removeObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: removeObjects(final String bucketName, final Iterable<String> objectNames)");
}
long startTime = System.currentTimeMillis();
try {
String[] objectNames = new String[4];
for (int i = 0; i < 3; i++) {
objectNames[i] = getRandomName();
try (final InputStream is = new ContentInputStream(1)) {
client.putObject(bucketName, objectNames[i], is, new PutObjectOptions(1, -1));
}
}
objectNames[3] = "nonexistent-object";
for (Result<?> r : client.removeObjects(bucketName, Arrays.asList(objectNames))) {
ignore(r.get());
}
mintSuccessLog(
"removeObjects(final String bucketName, final Iterable<String> objectNames)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"removeObjects(final String bucketName, final Iterable<String> objectNames)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listIncompleteUploads(String bucketName). */
public static void listIncompleteUploads_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: listIncompleteUploads(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(6 * MB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(9 * MB, -1));
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.INCOMPLETE_BODY) {
throw e;
}
} catch (InsufficientDataException e) {
ignore();
}
int i = 0;
for (Result<Upload> r : client.listIncompleteUploads(bucketName)) {
ignore(i++, r.get());
if (i == 10) {
break;
}
}
client.removeIncompleteUpload(bucketName, objectName);
mintSuccessLog("listIncompleteUploads(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"listIncompleteUploads(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listIncompleteUploads(String bucketName, String prefix). */
public static void listIncompleteUploads_test2() throws Exception {
if (!mintEnv) {
System.out.println("Test: listIncompleteUploads(String bucketName, String prefix)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(6 * MB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(9 * MB, -1));
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.INCOMPLETE_BODY) {
throw e;
}
} catch (InsufficientDataException e) {
ignore();
}
int i = 0;
for (Result<Upload> r : client.listIncompleteUploads(bucketName, "minio")) {
ignore(i++, r.get());
if (i == 10) {
break;
}
}
client.removeIncompleteUpload(bucketName, objectName);
mintSuccessLog("listIncompleteUploads(String bucketName, String prefix)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"listIncompleteUploads(String bucketName, String prefix)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: listIncompleteUploads(final String bucketName, final String prefix, final boolean
* recursive).
*/
public static void listIncompleteUploads_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: listIncompleteUploads(final String bucketName, final String prefix, "
+ "final boolean recursive)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(6 * MB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(9 * MB, -1));
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.INCOMPLETE_BODY) {
throw e;
}
} catch (InsufficientDataException e) {
ignore();
}
int i = 0;
for (Result<Upload> r : client.listIncompleteUploads(bucketName, "minio", true)) {
ignore(i++, r.get());
if (i == 10) {
break;
}
}
client.removeIncompleteUpload(bucketName, objectName);
mintSuccessLog(
"listIncompleteUploads(final String bucketName, final String prefix, final boolean recursive)",
"prefix: minio, recursive: true",
startTime);
} catch (Exception e) {
mintFailedLog(
"listIncompleteUploads(final String bucketName, final String prefix, final boolean recursive)",
"prefix: minio, recursive: true",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: removeIncompleteUpload(String bucketName, String objectName). */
public static void removeIncompleteUploads_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: removeIncompleteUpload(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(6 * MB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(9 * MB, -1));
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.INCOMPLETE_BODY) {
throw e;
}
} catch (InsufficientDataException e) {
ignore();
}
int i = 0;
for (Result<Upload> r : client.listIncompleteUploads(bucketName)) {
ignore(i++, r.get());
if (i == 10) {
break;
}
}
client.removeIncompleteUpload(bucketName, objectName);
mintSuccessLog(
"removeIncompleteUpload(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"removeIncompleteUpload(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** public String presignedGetObject(String bucketName, String objectName). */
public static void presignedGetObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: presignedGetObject(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
byte[] inBytes;
try (final InputStream is = new ContentInputStream(1 * KB)) {
inBytes = readAllBytes(is);
}
String urlString = client.presignedGetObject(bucketName, objectName);
byte[] outBytes = readObject(urlString);
if (!Arrays.equals(inBytes, outBytes)) {
throw new Exception("object content differs");
}
client.removeObject(bucketName, objectName);
mintSuccessLog("presignedGetObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"presignedGetObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: presignedGetObject(String bucketName, String objectName, Integer expires). */
public static void presignedGetObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: presignedGetObject(String bucketName, String objectName, Integer expires)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
byte[] inBytes;
try (final InputStream is = new ContentInputStream(1 * KB)) {
inBytes = readAllBytes(is);
}
String urlString = client.presignedGetObject(bucketName, objectName, 3600);
byte[] outBytes = readObject(urlString);
if (!Arrays.equals(inBytes, outBytes)) {
throw new Exception("object content differs");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"presignedGetObject(String bucketName, String objectName, Integer expires)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"presignedGetObject(String bucketName, String objectName, Integer expires)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* public String presignedGetObject(String bucketName, String objectName, Integer expires, Map
* reqParams).
*/
public static void presignedGetObject_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: presignedGetObject(String bucketName, String objectName, Integer expires, "
+ "Map<String, String> reqParams)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
byte[] inBytes;
try (final InputStream is = new ContentInputStream(1 * KB)) {
inBytes = readAllBytes(is);
}
Map<String, String> reqParams = new HashMap<>();
reqParams.put("response-content-type", "application/json");
String urlString = client.presignedGetObject(bucketName, objectName, 3600, reqParams);
byte[] outBytes = readObject(urlString);
if (!Arrays.equals(inBytes, outBytes)) {
throw new Exception("object content differs");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"presignedGetObject(String bucketName, String objectName, Integer expires, Map<String,"
+ " String> reqParams)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"presignedGetObject(String bucketName, String objectName, Integer expires, Map<String,"
+ " String> reqParams)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** public String presignedPutObject(String bucketName, String objectName). */
public static void presignedPutObject_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: presignedPutObject(String bucketName, String objectName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
String urlString = client.presignedPutObject(bucketName, objectName);
byte[] data = "hello, world".getBytes(StandardCharsets.UTF_8);
writeObject(urlString, data);
client.removeObject(bucketName, objectName);
mintSuccessLog("presignedPutObject(String bucketName, String objectName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"presignedPutObject(String bucketName, String objectName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: presignedPutObject(String bucketName, String objectName, Integer expires). */
public static void presignedPutObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: presignedPutObject(String bucketName, String objectName, Integer expires)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
String urlString = client.presignedPutObject(bucketName, objectName, 3600);
byte[] data = "hello, world".getBytes(StandardCharsets.UTF_8);
writeObject(urlString, data);
client.removeObject(bucketName, objectName);
mintSuccessLog(
"presignedPutObject(String bucketName, String objectName, Integer expires)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"presignedPutObject(String bucketName, String objectName, Integer expires)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: presignedPostPolicy(PostPolicy policy). */
public static void presignedPostPolicy_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: presignedPostPolicy(PostPolicy policy)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
PostPolicy policy = new PostPolicy(bucketName, objectName, ZonedDateTime.now().plusDays(7));
policy.setContentRange(1 * MB, 4 * MB);
Map<String, String> formData = client.presignedPostPolicy(policy);
MultipartBody.Builder multipartBuilder = new MultipartBody.Builder();
multipartBuilder.setType(MultipartBody.FORM);
for (Map.Entry<String, String> entry : formData.entrySet()) {
multipartBuilder.addFormDataPart(entry.getKey(), entry.getValue());
}
try (final InputStream is = new ContentInputStream(1 * MB)) {
multipartBuilder.addFormDataPart(
"file", objectName, RequestBody.create(null, readAllBytes(is)));
}
Request.Builder requestBuilder = new Request.Builder();
String urlString = client.getObjectUrl(bucketName, "x");
// remove last two characters to get clean url string of bucket.
urlString = urlString.substring(0, urlString.length() - 2);
Request request = requestBuilder.url(urlString).post(multipartBuilder.build()).build();
OkHttpClient transport =
new OkHttpClient()
.newBuilder()
.connectTimeout(20, TimeUnit.SECONDS)
.writeTimeout(20, TimeUnit.SECONDS)
.readTimeout(20, TimeUnit.SECONDS)
.build();
Response response = transport.newCall(request).execute();
if (response == null) {
throw new Exception("no response from server");
}
try {
if (!response.isSuccessful()) {
String errorXml = new String(response.body().bytes(), StandardCharsets.UTF_8);
throw new Exception(
"failed to upload object. Response: " + response + ", Error: " + errorXml);
}
} finally {
response.close();
}
client.removeObject(bucketName, objectName);
mintSuccessLog("presignedPostPolicy(PostPolicy policy)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"presignedPostPolicy(PostPolicy policy)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: PutObject(): do put object using multi-threaded way in parallel. */
public static void threadedPutObject() throws Exception {
if (!mintEnv) {
System.out.println("Test: threadedPutObject");
}
long startTime = System.currentTimeMillis();
try {
Thread[] threads = new Thread[7];
for (int i = 0; i < 7; i++) {
threads[i] = new Thread(new PutObjectRunnable(client, bucketName, createFile6Mb(), 6 * MB));
}
for (int i = 0; i < 7; i++) {
threads[i].start();
}
// Waiting for threads to complete.
for (int i = 0; i < 7; i++) {
threads[i].join();
}
// All threads are completed.
mintSuccessLog(
"putObject(String bucketName, String objectName, String filename)",
"filename: threaded6MB",
startTime);
} catch (Exception e) {
mintFailedLog(
"putObject(String bucketName, String objectName, String filename)",
"filename: threaded6MB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: copyObject(String bucketName, String objectName, String destBucketName). */
public static void copyObject_test1() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
client.copyObject(destBucketName, objectName, null, null, bucketName, null, null, null);
client.getObject(destBucketName, objectName).close();
client.removeObject(bucketName, objectName);
client.removeObject(destBucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with ETag to match.
*/
public static void copyObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with Matching ETag (Negative Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
CopyConditions invalidETag = new CopyConditions();
invalidETag.setMatchETag("TestETag");
try {
client.copyObject(
destBucketName, objectName, null, null, bucketName, null, null, invalidETag);
} catch (ErrorResponseException e) {
if (e.errorResponse().errorCode() != ErrorCode.PRECONDITION_FAILED) {
throw e;
}
}
client.removeObject(bucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName,"
+ " CopyConditions copyConditions)",
"CopyConditions: invalidETag",
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
"CopyConditions: invalidETag",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with ETag to match.
*/
public static void copyObject_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with Matching ETag (Positive Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
ObjectStat stat = client.statObject(bucketName, objectName);
CopyConditions copyConditions = new CopyConditions();
copyConditions.setMatchETag(stat.etag());
// File should be copied as ETag set in copyConditions matches object's ETag.
client.copyObject(
destBucketName, objectName, null, null, bucketName, null, null, copyConditions);
client.getObject(destBucketName, objectName).close();
client.removeObject(bucketName, objectName);
client.removeObject(destBucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName,"
+ " CopyConditions copyConditions)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName,"
+ " CopyConditions copyConditions)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with ETag to not match.
*/
public static void copyObject_test4() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with not matching ETag"
+ " (Positive Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
CopyConditions copyConditions = new CopyConditions();
copyConditions.setMatchETagNone("TestETag");
// File should be copied as ETag set in copyConditions doesn't match object's
// ETag.
client.copyObject(
destBucketName, objectName, null, null, bucketName, null, null, copyConditions);
client.getObject(destBucketName, objectName).close();
client.removeObject(bucketName, objectName);
client.removeObject(destBucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName,"
+ " CopyConditions copyConditions)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with ETag to not match.
*/
public static void copyObject_test5() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with not matching ETag"
+ " (Negative Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
ObjectStat stat = client.statObject(bucketName, objectName);
CopyConditions matchingETagNone = new CopyConditions();
matchingETagNone.setMatchETagNone(stat.etag());
try {
client.copyObject(
destBucketName, objectName, null, null, bucketName, null, null, matchingETagNone);
} catch (ErrorResponseException e) {
// File should not be copied as ETag set in copyConditions matches object's
// ETag.
if (e.errorResponse().errorCode() != ErrorCode.PRECONDITION_FAILED) {
throw e;
}
}
client.removeObject(bucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with object modified after condition.
*/
public static void copyObject_test6() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with modified after "
+ "condition (Positive Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
CopyConditions modifiedDateCondition = new CopyConditions();
modifiedDateCondition.setModified(ZonedDateTime.of(2015, 05, 3, 3, 10, 10, 0, Time.UTC));
// File should be copied as object was modified after the set date.
client.copyObject(
destBucketName, objectName, null, null, bucketName, null, null, modifiedDateCondition);
client.getObject(destBucketName, objectName).close();
client.removeObject(bucketName, objectName);
client.removeObject(destBucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
"CopyCondition: modifiedDateCondition",
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
"CopyCondition: modifiedDateCondition",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions) with object modified after condition.
*/
public static void copyObject_test7() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions) with modified after"
+ " condition (Negative Case)");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
CopyConditions invalidUnmodifiedCondition = new CopyConditions();
invalidUnmodifiedCondition.setUnmodified(
ZonedDateTime.of(2015, 05, 3, 3, 10, 10, 0, Time.UTC));
try {
client.copyObject(
destBucketName,
objectName,
null,
null,
bucketName,
null,
null,
invalidUnmodifiedCondition);
} catch (ErrorResponseException e) {
// File should not be copied as object was modified after date set in
// copyConditions.
if (e.errorResponse().errorCode() != ErrorCode.PRECONDITION_FAILED) {
throw e;
}
}
client.removeObject(bucketName, objectName);
// Destination bucket is expected to be empty, otherwise it will trigger an
// exception.
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
"CopyCondition: invalidUnmodifiedCondition",
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions)",
"CopyCondition: invalidUnmodifiedCondition",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions, Map metadata) replace object metadata.
*/
public static void copyObject_test8() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions, Map<String, String> metadata)"
+ " replace object metadata");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
String destBucketName = getRandomName();
client.makeBucket(destBucketName);
CopyConditions copyConditions = new CopyConditions();
copyConditions.setReplaceMetadataDirective();
Map<String, String> metadata = new HashMap<>();
metadata.put("Content-Type", customContentType);
client.copyObject(
destBucketName, objectName, metadata, null, bucketName, objectName, null, copyConditions);
ObjectStat objectStat = client.statObject(destBucketName, objectName);
if (!customContentType.equals(objectStat.contentType())) {
throw new Exception(
"content type differs. expected: "
+ customContentType
+ ", got: "
+ objectStat.contentType());
}
client.removeObject(bucketName, objectName);
client.removeObject(destBucketName, objectName);
client.removeBucket(destBucketName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions, Map<String, String> metadata)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions, Map<String, String> metadata)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, String destBucketName, CopyConditions
* copyConditions, Map metadata) remove object metadata.
*/
public static void copyObject_test9() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, String destBucketName,"
+ "CopyConditions copyConditions, Map<String, String> metadata)"
+ " remove object metadata");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> headerMap = new HashMap<>();
headerMap.put("Test", "testValue");
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setHeaders(headerMap);
client.putObject(bucketName, objectName, is, options);
}
// Attempt to remove the user-defined metadata from the object
CopyConditions copyConditions = new CopyConditions();
copyConditions.setReplaceMetadataDirective();
client.copyObject(
bucketName,
objectName,
new HashMap<String, String>(),
null,
bucketName,
objectName,
null,
copyConditions);
ObjectStat objectStat = client.statObject(bucketName, objectName);
if (objectStat.httpHeaders().containsKey("X-Amz-Meta-Test")) {
throw new Exception("expected user-defined metadata has been removed");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions, Map<String, String> metadata)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, String destBucketName, "
+ "CopyConditions copyConditions, Map<String, String> metadata)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, String
* destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget) To test using
* SSE_C.
*/
public static void copyObject_test10() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_C. ");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
// Generate a new 256 bit AES key - This key must be remembered by the client.
byte[] key = "01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpec = new SecretKeySpec(key, "AES");
ServerSideEncryption ssePut = ServerSideEncryption.withCustomerKey(secretKeySpec);
ServerSideEncryption sseSource = ServerSideEncryption.withCustomerKey(secretKeySpec);
byte[] keyTarget = "98765432100123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpecTarget = new SecretKeySpec(keyTarget, "AES");
ServerSideEncryption sseTarget = ServerSideEncryption.withCustomerKey(secretKeySpecTarget);
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setSse(ssePut);
client.putObject(bucketName, objectName, is, options);
}
// Attempt to remove the user-defined metadata from the object
CopyConditions copyConditions = new CopyConditions();
copyConditions.setReplaceMetadataDirective();
client.copyObject(
bucketName,
objectName,
null,
sseTarget,
bucketName,
objectName,
sseSource,
copyConditions);
client.statObject(bucketName, objectName, sseTarget); // Check for object existence.
client.removeObject(bucketName, objectName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_C.",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_C.",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, String
* destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget) To test using
* SSE_S3.
*/
public static void copyObject_test11() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_S3. ");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
ServerSideEncryption sse = ServerSideEncryption.atRest();
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
// Attempt to remove the user-defined metadata from the object
CopyConditions copyConditions = new CopyConditions();
copyConditions.setReplaceMetadataDirective();
client.copyObject(
bucketName, objectName, null, sse, bucketName, objectName, null, copyConditions);
ObjectStat objectStat = client.statObject(bucketName, objectName);
if (objectStat.httpHeaders().containsKey("X-Amz-Meta-Test")) {
throw new Exception("expected user-defined metadata has been removed");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_S3.",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_S3.",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, String
* destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget) To test using
* SSE_KMS.
*/
public static void copyObject_test12() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_KMS. ");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
Map<String, String> myContext = new HashMap<>();
myContext.put("key1", "value1");
String keyId = "";
keyId = System.getenv("MINT_KEY_ID");
if (keyId.equals("")) {
mintIgnoredLog("getBucketPolicy(String bucketName)", null, startTime);
}
ServerSideEncryption sse = ServerSideEncryption.withManagedKeys("keyId", myContext);
try (final InputStream is = new ContentInputStream(1)) {
PutObjectOptions options = new PutObjectOptions(1, -1);
options.setSse(sse);
client.putObject(bucketName, objectName, is, options);
}
// Attempt to remove the user-defined metadata from the object
CopyConditions copyConditions = new CopyConditions();
copyConditions.setReplaceMetadataDirective();
client.copyObject(
bucketName, objectName, null, sse, bucketName, objectName, null, copyConditions);
ObjectStat objectStat = client.statObject(bucketName, objectName);
if (objectStat.httpHeaders().containsKey("X-Amz-Meta-Test")) {
throw new Exception("expected user-defined metadata has been removed");
}
client.removeObject(bucketName, objectName);
mintSuccessLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_KMS.",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"copyObject(String bucketName, String objectName, ServerSideEncryption sseSource, "
+ "String destBucketName, CopyConditions copyConditions, ServerSideEncryption sseTarget)"
+ " using SSE_KMS.",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test1() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget).");
}
long startTime = System.currentTimeMillis();
try {
String destinationObjectName = getRandomName();
String filename1 = createFile6Mb();
String filename2 = createFile6Mb();
PutObjectOptions options = new PutObjectOptions(6 * MB, -1);
client.putObject(bucketName, filename1, filename1, options);
client.putObject(bucketName, filename2, filename2, options);
ComposeSource s1 = new ComposeSource(bucketName, filename1, null, null, null, null, null);
ComposeSource s2 = new ComposeSource(bucketName, filename2, null, null, null, null, null);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
listSourceObjects.add(s2);
client.composeObject(bucketName, destinationObjectName, listSourceObjects, null, null);
Files.delete(Paths.get(filename1));
Files.delete(Paths.get(filename2));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, filename2);
client.removeObject(bucketName, destinationObjectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"size: 6 MB & 6 MB ",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"size: 6 MB & 6 MB",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test2() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with offset and length.");
}
long startTime = System.currentTimeMillis();
try {
String destinationObjectName = getRandomName();
String filename1 = createFile6Mb();
String filename2 = createFile6Mb();
PutObjectOptions options = new PutObjectOptions(6 * MB, -1);
client.putObject(bucketName, filename1, filename1, options);
client.putObject(bucketName, filename2, filename2, options);
ComposeSource s1 = new ComposeSource(bucketName, filename1, 10L, 6291436L, null, null, null);
ComposeSource s2 = new ComposeSource(bucketName, filename2, null, null, null, null, null);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
listSourceObjects.add(s2);
client.composeObject(bucketName, destinationObjectName, listSourceObjects, null, null);
Files.delete(Paths.get(filename1));
Files.delete(Paths.get(filename2));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, filename2);
client.removeObject(bucketName, destinationObjectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with offset and length.",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)"
+ "with offset and length.",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with offset and length.",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test3() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with one source");
}
long startTime = System.currentTimeMillis();
try {
String destinationObjectName = getRandomName();
String filename1 = createFile6Mb();
client.putObject(bucketName, filename1, filename1, new PutObjectOptions(6 * MB, -1));
ComposeSource s1 = new ComposeSource(bucketName, filename1, 10L, 6291436L, null, null, null);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
client.composeObject(bucketName, destinationObjectName, listSourceObjects, null, null);
Files.delete(Paths.get(filename1));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, destinationObjectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with one source.",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)"
+ "with one source.",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with one source.",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test4() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > userMetaData, ServerSideEncryption sseTarget) with SSE_C and SSE_C Target");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
// Generate a new 256 bit AES key - This key must be remembered by the client.
byte[] key = "01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpec = new SecretKeySpec(key, "AES");
ServerSideEncryption ssePut = ServerSideEncryption.withCustomerKey(secretKeySpec);
byte[] keyTarget = "01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpecTarget = new SecretKeySpec(keyTarget, "AES");
ServerSideEncryption sseTarget = ServerSideEncryption.withCustomerKey(secretKeySpecTarget);
String filename1 = createFile6Mb();
String filename2 = createFile6Mb();
PutObjectOptions options = new PutObjectOptions(6 * MB, -1);
options.setSse(ssePut);
client.putObject(bucketName, filename1, filename1, options);
client.putObject(bucketName, filename2, filename2, options);
ComposeSource s1 = new ComposeSource(bucketName, filename1, null, null, null, null, ssePut);
ComposeSource s2 = new ComposeSource(bucketName, filename2, null, null, null, null, ssePut);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
listSourceObjects.add(s2);
client.composeObject(bucketName, objectName, listSourceObjects, null, sseTarget);
Files.delete(Paths.get(filename1));
Files.delete(Paths.get(filename2));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, filename2);
client.removeObject(bucketName, objectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with SSE_C and SSE_C Target",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with SSE_C and"
+ "SSE_C Target",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with SSE_C and ",
"SSE_C Target",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test5() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > userMetaData, ServerSideEncryption sseTarget) with SSE_C on one source object");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
// Generate a new 256 bit AES key - This key must be remembered by the client.
byte[] key = "01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpec = new SecretKeySpec(key, "AES");
ServerSideEncryption ssePut = ServerSideEncryption.withCustomerKey(secretKeySpec);
String filename1 = createFile6Mb();
String filename2 = createFile6Mb();
PutObjectOptions options = new PutObjectOptions(6 * MB, -1);
options.setSse(ssePut);
client.putObject(bucketName, filename1, filename1, options);
client.putObject(bucketName, filename2, filename2, new PutObjectOptions(6 * MB, -1));
ComposeSource s1 = new ComposeSource(bucketName, filename1, null, null, null, null, ssePut);
ComposeSource s2 = new ComposeSource(bucketName, filename2, null, null, null, null, null);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
listSourceObjects.add(s2);
client.composeObject(bucketName, objectName, listSourceObjects, null, null);
Files.delete(Paths.get(filename1));
Files.delete(Paths.get(filename2));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, filename2);
client.removeObject(bucketName, objectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"with SSE_C on one source object ",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with SSE_C on and"
+ "one source object",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with SSE_C on ",
"one source object",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/**
* Test: composeObject(String bucketName, String objectName, List<ComposeSource>
* composeSources,Map <String, String> headerMap, ServerSideEncryption sseTarget).
*/
public static void composeObject_test6() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > userMetaData, ServerSideEncryption sseTarget) with SSE_C Target only.");
}
long startTime = System.currentTimeMillis();
try {
String objectName = getRandomName();
byte[] keyTarget = "01234567890123456789012345678901".getBytes(StandardCharsets.UTF_8);
SecretKeySpec secretKeySpecTarget = new SecretKeySpec(keyTarget, "AES");
ServerSideEncryption sseTarget = ServerSideEncryption.withCustomerKey(secretKeySpecTarget);
String filename1 = createFile6Mb();
String filename2 = createFile6Mb();
PutObjectOptions options = new PutObjectOptions(6 * MB, -1);
client.putObject(bucketName, filename1, filename1, options);
client.putObject(bucketName, filename2, filename2, options);
ComposeSource s1 = new ComposeSource(bucketName, filename1, null, null, null, null, null);
ComposeSource s2 = new ComposeSource(bucketName, filename2, null, null, null, null, null);
List<ComposeSource> listSourceObjects = new ArrayList<ComposeSource>();
listSourceObjects.add(s1);
listSourceObjects.add(s2);
client.composeObject(bucketName, objectName, listSourceObjects, null, sseTarget);
Files.delete(Paths.get(filename1));
Files.delete(Paths.get(filename2));
client.removeObject(bucketName, filename1);
client.removeObject(bucketName, filename2);
client.removeObject(bucketName, objectName);
mintSuccessLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget)",
"SSE_C Target only.",
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) with SSE_C only",
null,
startTime);
} else {
mintFailedLog(
"composeObject(String bucketName, String objectName,List<ComposeSource> composeSources, "
+ "Map <String,String > headerMap, ServerSideEncryption sseTarget) SSE_C Target ",
" only.",
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: enableObjectLegalHold(String bucketName, String objectName, String versionId) */
public static void enableObjectLegalHold_test() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: enableObjectLegalHold(String bucketName, String objectName, String versionId)");
}
long startTime = System.currentTimeMillis();
String bucketName = getRandomName();
String objectName = getRandomName();
try {
client.makeBucket(bucketName, null, true);
try {
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.enableObjectLegalHold(bucketName, objectName, null);
if (!client.isObjectLegalHoldEnabled(bucketName, objectName, null)) {
throw new Exception("[FAILED] isObjectLegalHoldEnabled(): expected: true, got: false");
}
client.disableObjectLegalHold(bucketName, objectName, null);
mintSuccessLog(
"enableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime);
} finally {
client.removeObject(bucketName, objectName);
client.removeBucket(bucketName);
}
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"enableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime);
} else {
mintFailedLog(
"enableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: disableObjectLegalHold(String bucketName, String objectName, String versionId) */
public static void disableObjectLegalHold_test() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: disableObjectLegalHold(String bucketName, String objectName, String versionId)");
}
long startTime = System.currentTimeMillis();
String bucketName = getRandomName();
String objectName = getRandomName();
try {
client.makeBucket(bucketName, null, true);
try {
try (final InputStream is = new ContentInputStream(1 * KB)) {
client.putObject(bucketName, objectName, is, new PutObjectOptions(1 * KB, -1));
}
client.enableObjectLegalHold(bucketName, objectName, null);
client.disableObjectLegalHold(bucketName, objectName, null);
if (client.isObjectLegalHoldEnabled(bucketName, objectName, null)) {
throw new Exception("[FAILED] isObjectLegalHoldEnabled(): expected: false, got: true");
}
} finally {
client.removeObject(bucketName, objectName);
client.removeBucket(bucketName);
}
mintSuccessLog(
"disableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"disableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime);
} else {
mintFailedLog(
"disableObjectLegalHold(String bucketName, String objectName, String versionId)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: setDefaultRetention(String bucketName). */
public static void setDefaultRetention_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: setDefaultRetention(String bucketName)");
}
long startTime = System.currentTimeMillis();
String bucketName = getRandomName();
try {
client.makeBucket(bucketName, null, true);
try {
ObjectLockConfiguration config =
new ObjectLockConfiguration(RetentionMode.COMPLIANCE, new RetentionDurationDays(10));
client.setDefaultRetention(bucketName, config);
} finally {
client.removeBucket(bucketName);
}
mintSuccessLog("setDefaultRetention (String bucketName)", null, startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog("setDefaultRetention (String bucketName)", null, startTime);
} else {
mintFailedLog(
"setDefaultRetention (String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: getDefaultRetention(String bucketName). */
public static void getDefaultRetention_test() throws Exception {
if (!mintEnv) {
System.out.println("Test: getDefaultRetention(String bucketName)");
}
long startTime = System.currentTimeMillis();
String bucketName = getRandomName();
try {
client.makeBucket(bucketName, null, true);
try {
ObjectLockConfiguration expectedConfig =
new ObjectLockConfiguration(RetentionMode.COMPLIANCE, new RetentionDurationDays(10));
client.setDefaultRetention(bucketName, expectedConfig);
ObjectLockConfiguration config = client.getDefaultRetention(bucketName);
if ((!(config.duration().unit() == expectedConfig.duration().unit()
&& config.duration().duration() == expectedConfig.duration().duration()))
|| (config.mode() != expectedConfig.mode())) {
throw new Exception(
"[FAILED] Expected: expected duration : "
+ expectedConfig.duration()
+ ", got: "
+ config.duration()
+ " expected mode :"
+ expectedConfig.mode()
+ ", got: "
+ config.mode());
}
expectedConfig =
new ObjectLockConfiguration(RetentionMode.GOVERNANCE, new RetentionDurationYears(1));
client.setDefaultRetention(bucketName, expectedConfig);
config = client.getDefaultRetention(bucketName);
if ((!(config.duration().unit() == expectedConfig.duration().unit()
&& config.duration().duration() == expectedConfig.duration().duration()))
|| (config.mode() != expectedConfig.mode())) {
throw new Exception(
"[FAILED] Expected: expected duration : "
+ expectedConfig.duration()
+ ", got: "
+ config.duration()
+ " expected mode :"
+ expectedConfig.mode()
+ ", got: "
+ config.mode());
}
} finally {
client.removeBucket(bucketName);
}
mintSuccessLog("getDefaultRetention (String bucketName)", null, startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog("getDefaultRetention (String bucketName)", null, startTime);
} else {
mintFailedLog(
"getDefaultRetention (String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: getBucketPolicy(String bucketName). */
public static void getBucketPolicy_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: getBucketPolicy(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String policy =
"{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":[\"s3:GetObject\"],\"Effect\":\"Allow\","
+ "\"Principal\":{\"AWS\":[\"*\"]},\"Resource\":[\"arn:aws:s3:::"
+ bucketName
+ "/myobject*\"],\"Sid\":\"\"}]}";
client.setBucketPolicy(bucketName, policy);
client.getBucketPolicy(bucketName);
mintSuccessLog("getBucketPolicy(String bucketName)", null, startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog("getBucketPolicy(String bucketName)", null, startTime);
} else {
mintFailedLog(
"getBucketPolicy(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: setBucketPolicy(String bucketName, String policy). */
public static void setBucketPolicy_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: setBucketPolicy(String bucketName, String policy)");
}
long startTime = System.currentTimeMillis();
try {
String policy =
"{\"Statement\":[{\"Action\":\"s3:GetObject\",\"Effect\":\"Allow\",\"Principal\":"
+ "\"*\",\"Resource\":\"arn:aws:s3:::"
+ bucketName
+ "/myobject*\"}],\"Version\": \"2012-10-17\"}";
client.setBucketPolicy(bucketName, policy);
mintSuccessLog("setBucketPolicy(String bucketName, String policy)", null, startTime);
} catch (Exception e) {
ErrorResponse errorResponse = null;
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
errorResponse = exp.errorResponse();
}
// Ignore NotImplemented error
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"setBucketPolicy(String bucketName, String objectPrefix, " + "PolicyType policyType)",
null,
startTime);
} else {
mintFailedLog(
"setBucketPolicy(String bucketName, String objectPrefix, " + "PolicyType policyType)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
}
/** Test: setBucketLifeCycle(String bucketName, String lifeCycle). */
public static void setBucketLifeCycle_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: setBucketLifeCycle(String bucketName, String lifeCycle)");
}
long startTime = System.currentTimeMillis();
try {
String lifeCycle =
"<LifecycleConfiguration><Rule><ID>expire-bucket</ID><Prefix></Prefix>"
+ "<Status>Enabled</Status><Expiration><Days>365</Days></Expiration>"
+ "</Rule></LifecycleConfiguration>";
client.setBucketLifeCycle(bucketName, lifeCycle);
mintSuccessLog("setBucketLifeCycle(String bucketName, String lifeCycle)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"setBucketLifeCycle(String bucketName, String lifeCycle) ",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: deleteBucketLifeCycle(String bucketName). */
public static void deleteBucketLifeCycle_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: deleteBucketLifeCycle(String bucketNam)");
}
long startTime = System.currentTimeMillis();
try {
client.deleteBucketLifeCycle(bucketName);
mintSuccessLog("deleteBucketLifeCycle(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"deleteBucketLifeCycle(String bucketName) ",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getBucketLifeCycle(String bucketName). */
public static void getBucketLifeCycle_test1() throws Exception {
if (!mintEnv) {
System.out.println("Test: getBucketLifeCycle(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
client.getBucketLifeCycle(bucketName);
mintSuccessLog("getBucketLifeCycle(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"getBucketLifeCycle(String bucketName) ",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/**
* Test: setBucketNotification(String bucketName, NotificationConfiguration
* notificationConfiguration).
*/
public static void setBucketNotification_test1() throws Exception {
// This test requires 'MINIO_JAVA_TEST_TOPIC' and 'MINIO_JAVA_TEST_REGION'
// environment variables.
String topic = System.getenv("MINIO_JAVA_TEST_TOPIC");
String region = System.getenv("MINIO_JAVA_TEST_REGION");
if (topic == null || topic.equals("") || region == null || region.equals("")) {
// do not run functional test as required environment variables are missing.
return;
}
if (!mintEnv) {
System.out.println(
"Test: setBucketNotification(String bucketName, "
+ "NotificationConfiguration notificationConfiguration)");
}
long startTime = System.currentTimeMillis();
try {
String destBucketName = getRandomName();
client.makeBucket(destBucketName, region);
NotificationConfiguration notificationConfiguration = new NotificationConfiguration();
// Add a new topic configuration.
List<TopicConfiguration> topicConfigurationList =
notificationConfiguration.topicConfigurationList();
TopicConfiguration topicConfiguration = new TopicConfiguration();
topicConfiguration.setTopic(topic);
List<EventType> eventList = new LinkedList<>();
eventList.add(EventType.OBJECT_CREATED_PUT);
eventList.add(EventType.OBJECT_CREATED_COPY);
topicConfiguration.setEvents(eventList);
topicConfiguration.setPrefixRule("images");
topicConfiguration.setSuffixRule("pg");
topicConfigurationList.add(topicConfiguration);
notificationConfiguration.setTopicConfigurationList(topicConfigurationList);
client.setBucketNotification(destBucketName, notificationConfiguration);
client.removeBucket(destBucketName);
mintSuccessLog(
"setBucketNotification(String bucketName, NotificationConfiguration notificationConfiguration)",
null,
startTime);
} catch (Exception e) {
mintFailedLog(
"setBucketNotification(String bucketName, NotificationConfiguration notificationConfiguration)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: getBucketNotification(String bucketName). */
public static void getBucketNotification_test1() throws Exception {
// This test requires 'MINIO_JAVA_TEST_TOPIC' and 'MINIO_JAVA_TEST_REGION'
// environment variables.
String topic = System.getenv("MINIO_JAVA_TEST_TOPIC");
String region = System.getenv("MINIO_JAVA_TEST_REGION");
if (topic == null || topic.equals("") || region == null || region.equals("")) {
// do not run functional test as required environment variables are missing.
return;
}
if (!mintEnv) {
System.out.println("Test: getBucketNotification(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String destBucketName = getRandomName();
client.makeBucket(destBucketName, region);
NotificationConfiguration notificationConfiguration = new NotificationConfiguration();
// Add a new topic configuration.
List<TopicConfiguration> topicConfigurationList =
notificationConfiguration.topicConfigurationList();
TopicConfiguration topicConfiguration = new TopicConfiguration();
topicConfiguration.setTopic(topic);
List<EventType> eventList = new LinkedList<>();
eventList.add(EventType.OBJECT_CREATED_PUT);
topicConfiguration.setEvents(eventList);
topicConfigurationList.add(topicConfiguration);
notificationConfiguration.setTopicConfigurationList(topicConfigurationList);
client.setBucketNotification(destBucketName, notificationConfiguration);
String expectedResult = notificationConfiguration.toString();
notificationConfiguration = client.getBucketNotification(destBucketName);
topicConfigurationList = notificationConfiguration.topicConfigurationList();
topicConfiguration = topicConfigurationList.get(0);
topicConfiguration.setId(null);
String result = notificationConfiguration.toString();
if (!result.equals(expectedResult)) {
System.out.println("FAILED. expected: " + expectedResult + ", got: " + result);
}
client.removeBucket(destBucketName);
mintSuccessLog("getBucketNotification(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"getBucketNotification(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: removeAllBucketNotification(String bucketName). */
public static void removeAllBucketNotification_test1() throws Exception {
// This test requires 'MINIO_JAVA_TEST_TOPIC' and 'MINIO_JAVA_TEST_REGION'
// environment variables.
String topic = System.getenv("MINIO_JAVA_TEST_TOPIC");
String region = System.getenv("MINIO_JAVA_TEST_REGION");
if (topic == null || topic.equals("") || region == null || region.equals("")) {
// do not run functional test as required environment variables are missing.
return;
}
if (!mintEnv) {
System.out.println("Test: removeAllBucketNotification(String bucketName)");
}
long startTime = System.currentTimeMillis();
try {
String destBucketName = getRandomName();
client.makeBucket(destBucketName, region);
NotificationConfiguration notificationConfiguration = new NotificationConfiguration();
// Add a new topic configuration.
List<TopicConfiguration> topicConfigurationList =
notificationConfiguration.topicConfigurationList();
TopicConfiguration topicConfiguration = new TopicConfiguration();
topicConfiguration.setTopic(topic);
List<EventType> eventList = new LinkedList<>();
eventList.add(EventType.OBJECT_CREATED_PUT);
eventList.add(EventType.OBJECT_CREATED_COPY);
topicConfiguration.setEvents(eventList);
topicConfiguration.setPrefixRule("images");
topicConfiguration.setSuffixRule("pg");
topicConfigurationList.add(topicConfiguration);
notificationConfiguration.setTopicConfigurationList(topicConfigurationList);
client.setBucketNotification(destBucketName, notificationConfiguration);
notificationConfiguration = new NotificationConfiguration();
String expectedResult = notificationConfiguration.toString();
client.removeAllBucketNotification(destBucketName);
notificationConfiguration = client.getBucketNotification(destBucketName);
String result = notificationConfiguration.toString();
if (!result.equals(expectedResult)) {
throw new Exception("[FAILED] Expected: " + expectedResult + ", Got: " + result);
}
client.removeBucket(destBucketName);
mintSuccessLog("removeAllBucketNotification(String bucketName)", null, startTime);
} catch (Exception e) {
mintFailedLog(
"removeAllBucketNotification(String bucketName)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
}
}
/** Test: listenBucketNotification(String bucketName). */
public static void listenBucketNotification_test1() throws Exception {
if (!mintEnv) {
System.out.println(
"Test: listenBucketNotification(String bucketName, String prefix, "
+ "String suffix, String[] events)");
}
long startTime = System.currentTimeMillis();
String file = createFile1Kb();
String bucketName = getRandomName();
CloseableIterator<Result<NotificationRecords>> ci = null;
try {
client.makeBucket(bucketName, region);
String[] events = {"s3:ObjectCreated:*", "s3:ObjectAccessed:*"};
ci = client.listenBucketNotification(bucketName, "prefix", "suffix", events);
client.putObject(bucketName, "prefix-random-suffix", file, new PutObjectOptions(1 * KB, -1));
while (ci.hasNext()) {
NotificationRecords records = ci.next().get();
if (records.events().size() == 0) {
continue;
}
boolean found = false;
for (Event event : records.events()) {
if (event.objectName().equals("prefix-random-suffix")) {
found = true;
break;
}
}
if (found) {
break;
}
}
mintSuccessLog(
"listenBucketNotification(String bucketName, String prefix, "
+ "String suffix, String[] events)",
null,
startTime);
} catch (Exception e) {
if (e instanceof ErrorResponseException) {
ErrorResponseException exp = (ErrorResponseException) e;
ErrorResponse errorResponse = exp.errorResponse();
if (errorResponse != null && errorResponse.errorCode() == ErrorCode.NOT_IMPLEMENTED) {
mintIgnoredLog(
"listenBucketNotification(String bucketName, String prefix, "
+ "String suffix, String[] events)",
null,
startTime);
return;
}
}
mintFailedLog(
"listenBucketNotification(String bucketName, String prefix, "
+ "String suffix, String[] events)",
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
} finally {
if (ci != null) {
ci.close();
}
Files.delete(Paths.get(file));
client.removeObject(bucketName, "prefix-random-suffix");
client.removeBucket(bucketName);
}
}
/**
* Test: selectObjectContent(String bucketName, String objectName, String sqlExpression,
* InputSerialization is, OutputSerialization os, boolean requestProgress, Long scanStartRange,
* Long scanEndRange, ServerSideEncryption sse).
*/
public static void selectObjectContent_test1() throws Exception {
String testName =
"selectObjectContent(String bucketName, String objectName, String sqlExpression,"
+ " InputSerialization is, OutputSerialization os, boolean requestProgress,"
+ " Long scanStartRange, Long scanEndRange, ServerSideEncryption sse)";
if (!mintEnv) {
System.out.println("Test: " + testName);
}
long startTime = System.currentTimeMillis();
String objectName = getRandomName();
SelectResponseStream responseStream = null;
try {
String expectedResult =
"1997,Ford,E350,\"ac, abs, moon\",3000.00\n"
+ "1999,Chevy,\"Venture \"\"Extended Edition\"\"\",,4900.00\n"
+ "1999,Chevy,\"Venture \"\"Extended Edition, Very Large\"\"\",,5000.00\n"
+ "1996,Jeep,Grand Cherokee,\"MUST SELL!\n"
+ "air, moon roof, loaded\",4799.00\n";
byte[] data =
("Year,Make,Model,Description,Price\n" + expectedResult).getBytes(StandardCharsets.UTF_8);
ByteArrayInputStream bais = new ByteArrayInputStream(data);
client.putObject(bucketName, objectName, bais, new PutObjectOptions(data.length, -1));
String sqlExpression = "select * from S3Object";
InputSerialization is =
new InputSerialization(null, false, null, null, FileHeaderInfo.USE, null, null, null);
OutputSerialization os =
new OutputSerialization(null, null, null, QuoteFields.ASNEEDED, null);
responseStream =
client.selectObjectContent(
bucketName, objectName, sqlExpression, is, os, true, null, null, null);
String result = new String(readAllBytes(responseStream), StandardCharsets.UTF_8);
if (!result.equals(expectedResult)) {
throw new Exception("result mismatch; expected: " + expectedResult + ", got: " + result);
}
Stats stats = responseStream.stats();
if (stats == null) {
throw new Exception("stats is null");
}
if (stats.bytesScanned() != 256) {
throw new Exception(
"stats.bytesScanned mismatch; expected: 258, got: " + stats.bytesScanned());
}
if (stats.bytesProcessed() != 256) {
throw new Exception(
"stats.bytesProcessed mismatch; expected: 258, got: " + stats.bytesProcessed());
}
if (stats.bytesReturned() != 222) {
throw new Exception(
"stats.bytesReturned mismatch; expected: 222, got: " + stats.bytesReturned());
}
mintSuccessLog(testName, null, startTime);
} catch (Exception e) {
mintFailedLog(
testName,
null,
startTime,
null,
e.toString() + " >>> " + Arrays.toString(e.getStackTrace()));
throw e;
} finally {
if (responseStream != null) {
responseStream.close();
}
client.removeObject(bucketName, objectName);
}
}
/** runTests: runs as much as possible of test combinations. */
public static void runTests() throws Exception {
makeBucket_test1();
if (endpoint.toLowerCase(Locale.US).contains("s3")) {
makeBucketwithRegion_test();
makeBucketWithPeriod_test();
}
listBuckets_test();
bucketExists_test();
removeBucket_test();
setup();
putObject_test1();
putObject_test2();
putObject_test3();
putObject_test4();
putObject_test5();
putObject_test6();
putObject_test7();
putObject_test8();
putObject_test9();
putObject_test10();
putObject_test11();
statObject_test1();
getObject_test1();
getObject_test2();
getObject_test3();
getObject_test4();
getObject_test5();
getObject_test6();
getObject_test8();
listObject_test1();
listObject_test2();
listObject_test3();
listObject_test4();
listObject_test5();
listObject_test6();
removeObject_test1();
removeObject_test2();
listIncompleteUploads_test1();
listIncompleteUploads_test2();
listIncompleteUploads_test3();
removeIncompleteUploads_test();
presignedGetObject_test1();
presignedGetObject_test2();
presignedGetObject_test3();
presignedPutObject_test1();
presignedPutObject_test2();
presignedPostPolicy_test();
copyObject_test1();
copyObject_test2();
copyObject_test3();
copyObject_test4();
copyObject_test5();
copyObject_test6();
copyObject_test7();
copyObject_test8();
copyObject_test9();
composeObject_test1();
composeObject_test2();
composeObject_test3();
enableObjectLegalHold_test();
disableObjectLegalHold_test();
setDefaultRetention_test();
getDefaultRetention_test();
selectObjectContent_test1();
// SSE_C tests will only work over TLS connection
Locale locale = Locale.ENGLISH;
boolean tlsEnabled = endpoint.toLowerCase(locale).contains("https://");
if (tlsEnabled) {
statObject_test2();
getObject_test7();
getObject_test9();
putObject_test12();
putObject_test13();
copyObject_test10();
composeObject_test4();
composeObject_test5();
composeObject_test6();
}
statObject_test3();
// SSE_S3 and SSE_KMS only work with endpoint="s3.amazonaws.com"
String requestUrl = endpoint;
if (requestUrl.equals("s3.amazonaws.com")) {
putObject_test14();
putObject_test15();
copyObject_test11();
copyObject_test12();
setBucketLifeCycle_test1();
getBucketLifeCycle_test1();
deleteBucketLifeCycle_test1();
}
getBucketPolicy_test1();
setBucketPolicy_test1();
listenBucketNotification_test1();
threadedPutObject();
teardown();
// notification tests requires 'MINIO_JAVA_TEST_TOPIC' and
// 'MINIO_JAVA_TEST_REGION' environment variables
// to be set appropriately.
setBucketNotification_test1();
getBucketNotification_test1();
removeAllBucketNotification_test1();
}
/** runQuickTests: runs tests those completely quicker. */
public static void runQuickTests() throws Exception {
makeBucket_test1();
listBuckets_test();
bucketExists_test();
removeBucket_test();
setup();
putObject_test1();
statObject_test1();
getObject_test1();
listObject_test1();
removeObject_test1();
listIncompleteUploads_test1();
removeIncompleteUploads_test();
presignedGetObject_test1();
presignedPutObject_test1();
presignedPostPolicy_test();
copyObject_test1();
getBucketPolicy_test1();
setBucketPolicy_test1();
selectObjectContent_test1();
listenBucketNotification_test1();
teardown();
}
public static boolean downloadMinio() throws IOException {
String url = "https://dl.min.io/server/minio/release/";
if (OS.contains("linux")) {
url += "linux-amd64/minio";
} else if (OS.contains("windows")) {
url += "windows-amd64/minio.exe";
} else if (OS.contains("mac")) {
url += "darwin-amd64/minio";
} else {
System.out.println("unknown operating system " + OS);
return false;
}
File file = new File(MINIO_BINARY);
if (file.exists()) {
return true;
}
System.out.println("downloading " + MINIO_BINARY + " binary");
Request.Builder requestBuilder = new Request.Builder();
Request request = requestBuilder.url(HttpUrl.parse(url)).method("GET", null).build();
OkHttpClient transport =
new OkHttpClient()
.newBuilder()
.connectTimeout(20, TimeUnit.SECONDS)
.writeTimeout(20, TimeUnit.SECONDS)
.readTimeout(20, TimeUnit.SECONDS)
.build();
Response response = transport.newCall(request).execute();
try {
if (!response.isSuccessful()) {
System.out.println("failed to download binary " + MINIO_BINARY);
return false;
}
BufferedSink bufferedSink = Okio.buffer(Okio.sink(new File(MINIO_BINARY)));
bufferedSink.writeAll(response.body().source());
bufferedSink.flush();
bufferedSink.close();
} finally {
response.close();
}
if (!OS.contains("windows")) {
file.setExecutable(true);
}
return true;
}
public static Process runMinio() throws Exception {
File binaryPath = new File(new File(System.getProperty("user.dir")), MINIO_BINARY);
ProcessBuilder pb = new ProcessBuilder(binaryPath.getPath(), "server", "d1");
Map<String, String> env = pb.environment();
env.put("MINIO_ACCESS_KEY", "minio");
env.put("MINIO_SECRET_KEY", "minio123");
pb.redirectErrorStream(true);
pb.redirectOutput(ProcessBuilder.Redirect.to(new File(MINIO_BINARY + ".log")));
System.out.println("starting minio server");
Process p = pb.start();
Thread.sleep(10 * 1000); // wait for 10 seconds to do real start.
return p;
}
/** main(). */
public static void main(String[] args) throws Exception {
String mintMode = null;
if (mintEnv) {
mintMode = System.getenv("MINT_MODE");
}
Process minioProcess = null;
if (args.length != 4) {
endpoint = "http://localhost:9000";
accessKey = "minio";
secretKey = "minio123";
region = "us-east-1";
if (!downloadMinio()) {
System.out.println("usage: FunctionalTest <ENDPOINT> <ACCESSKEY> <SECRETKEY> <REGION>");
System.exit(-1);
}
minioProcess = runMinio();
try {
int exitValue = minioProcess.exitValue();
System.out.println("minio server process exited with " + exitValue);
System.out.println("usage: FunctionalTest <ENDPOINT> <ACCESSKEY> <SECRETKEY> <REGION>");
System.exit(-1);
} catch (IllegalThreadStateException e) {
ignore();
}
} else {
String dataDir = System.getenv("MINT_DATA_DIR");
if (dataDir != null && !dataDir.equals("")) {
mintEnv = true;
dataFile1Kb = Paths.get(dataDir, "datafile-1-kB");
dataFile6Mb = Paths.get(dataDir, "datafile-6-MB");
}
endpoint = args[0];
accessKey = args[1];
secretKey = args[2];
region = args[3];
}
int exitValue = 0;
try {
client = new MinioClient(endpoint, accessKey, secretKey);
// Enable trace for debugging.
// client.traceOn(System.out);
// For mint environment, run tests based on mint mode
if (mintEnv) {
if (mintMode != null && mintMode.equals("full")) {
FunctionalTest.runTests();
} else {
FunctionalTest.runQuickTests();
}
} else {
FunctionalTest.runTests();
// Get new bucket name to avoid minio azure gateway failure.
bucketName = getRandomName();
// Quick tests with passed region.
client = new MinioClient(endpoint, accessKey, secretKey, region);
FunctionalTest.runQuickTests();
}
} catch (Exception e) {
e.printStackTrace();
exitValue = -1;
} finally {
if (minioProcess != null) {
minioProcess.destroy();
}
}
System.exit(exitValue);
}
}
| [
"\"MINT_KEY_ID\"",
"\"MINT_KEY_ID\"",
"\"MINIO_JAVA_TEST_TOPIC\"",
"\"MINIO_JAVA_TEST_REGION\"",
"\"MINIO_JAVA_TEST_TOPIC\"",
"\"MINIO_JAVA_TEST_REGION\"",
"\"MINIO_JAVA_TEST_TOPIC\"",
"\"MINIO_JAVA_TEST_REGION\"",
"\"MINT_MODE\"",
"\"MINT_DATA_DIR\""
]
| []
| [
"MINT_KEY_ID",
"MINIO_JAVA_TEST_TOPIC",
"MINT_MODE",
"MINIO_JAVA_TEST_REGION",
"MINT_DATA_DIR"
]
| [] | ["MINT_KEY_ID", "MINIO_JAVA_TEST_TOPIC", "MINT_MODE", "MINIO_JAVA_TEST_REGION", "MINT_DATA_DIR"] | java | 5 | 0 | |
appengine/go11x/helloworld/helloworld.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START gae_go111_app]
// Sample helloworld is an App Engine app.
package main
// [START import]
import (
"fmt"
"log"
"net/http"
"os"
)
// [END import]
// [START main_func]
func main() {
http.HandleFunc("/", indexHandler)
// [START setting_port]
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
// [END setting_port]
}
// [END main_func]
// [START indexHandler]
// indexHandler responds to requests with our greeting.
func indexHandler(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
fmt.Fprint(w, "Hello, World! 06/21/2020 17:29")
}
// [END indexHandler]
// [END gae_go111_app]
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
cmd/action-lgtm-reaction/main.go | // Copyright 2020 micnncim
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"strings"
"github.com/micnncim/action-lgtm-reaction/pkg/actions"
"github.com/micnncim/action-lgtm-reaction/pkg/github"
"github.com/micnncim/action-lgtm-reaction/pkg/lgtm"
"github.com/micnncim/action-lgtm-reaction/pkg/lgtm/giphy"
"github.com/micnncim/action-lgtm-reaction/pkg/lgtm/lgtmapp"
)
var (
githubToken = os.Getenv("GITHUB_TOKEN")
giphyAPIKey = os.Getenv("GIPHY_API_KEY")
)
type GitHubEvent struct {
Comment struct {
ID int `json:"id"`
Body string `json:"body"`
} `json:"comment"`
Issue struct {
Number int `json:"number"`
} `json:"issue"`
PullRequest struct {
Number int `json:"number"`
} `json:"pull_request"`
Review struct {
ID int `json:"id"`
Body string `json:"body"`
} `json:"review"`
}
var (
input actions.Input
)
func init() {
input = actions.GetInput()
}
func main() {
if err := run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func run() error {
e, err := getGitHubEvent()
if err != nil {
return err
}
needCreateComment, needUpdateComment, needUpdateReview, err := checkActionNeeded(e)
if err != nil {
return err
}
if !needCreateComment && !needUpdateComment && !needUpdateReview {
fmt.Fprintf(os.Stderr, "::debug::No need to do any action\n")
return nil
}
owner, repo, err := getGitHubRepo()
if err != nil {
return err
}
lc, err := createLGTMClient(input.Source)
if err != nil {
return err
}
lgtmComment, err := lc.GetRandom()
if err != nil {
return err
}
ctx := context.Background()
gc, err := github.NewClient(githubToken, input.Enterprise)
if err != nil {
return err
}
switch {
case needUpdateComment:
return gc.UpdateIssueComment(ctx, owner, repo, e.Comment.ID, lgtmComment)
case needCreateComment:
var number int
switch {
case e.Issue.Number != 0:
number = e.Issue.Number
case e.PullRequest.Number != 0:
number = e.PullRequest.Number
default:
return errors.New("issue number or pull request number don't exist")
}
return gc.CreateIssueComment(ctx, owner, repo, number, lgtmComment)
case needUpdateReview:
return gc.UpdateReview(ctx, owner, repo, e.PullRequest.Number, e.Review.ID, lgtmComment)
}
return nil
}
func createLGTMClient(source string) (c lgtm.Client, err error) {
switch source {
case lgtm.SourceGiphy.String():
c, err = giphy.NewClient(giphyAPIKey)
return
case lgtm.SourceLGTMApp.String():
c, err = lgtmapp.NewClient()
return
default:
err = fmt.Errorf("not support source: %s", source)
return
}
}
func checkActionNeeded(e *GitHubEvent) (needCreateComment, needUpdateComment, needUpdateReview bool, err error) {
var (
trigger = input.Trigger
override = input.Override
caseInsensitive = input.CaseInsensitive
)
var (
matchComment bool
matchReview bool
)
matchComment, err = matchTrigger(trigger, e.Comment.Body, caseInsensitive)
if err != nil {
return
}
matchReview, err = matchTrigger(trigger, e.Review.Body, caseInsensitive)
if err != nil {
return
}
needCreateComment = (matchComment || matchReview) && !override
needUpdateComment = matchComment && override
needUpdateReview = matchReview && override
return
}
func getGitHubEvent() (*GitHubEvent, error) {
p := os.Getenv("GITHUB_EVENT_PATH")
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
e := &GitHubEvent{}
if err := json.NewDecoder(f).Decode(e); err != nil {
return nil, err
}
return e, nil
}
func getGitHubRepo() (owner, repo string, err error) {
r := os.Getenv("GITHUB_REPOSITORY")
s := strings.Split(r, "/")
if len(s) != 2 {
err = fmt.Errorf("invalid github repository: %v\n", r)
return
}
owner, repo = s[0], s[1]
return
}
// trigger is expected as JSON array like '["a", "b"]'.
func parseTrigger(trigger string) ([]string, error) {
var a []string
if err := json.Unmarshal([]byte(trigger), &a); err != nil {
return nil, err
}
return a, nil
}
func matchTrigger(trigger, target string, caseInsensitive bool) (bool, error) {
regexps, err := parseTrigger(trigger)
if err != nil {
return false, err
}
for _, s := range regexps {
if caseInsensitive && !strings.HasPrefix(s, "(?i)") {
s = fmt.Sprintf("(?i)%s", s)
}
fmt.Fprintln(os.Stdout, fmt.Sprintf("::debug::Matching %s regexp: %s", target, s))
r := regexp.MustCompile(s)
if r.MatchString(target) {
return true, nil
}
}
return false, nil
}
| [
"\"GITHUB_TOKEN\"",
"\"GIPHY_API_KEY\"",
"\"GITHUB_EVENT_PATH\"",
"\"GITHUB_REPOSITORY\""
]
| []
| [
"GITHUB_EVENT_PATH",
"GITHUB_REPOSITORY",
"GIPHY_API_KEY",
"GITHUB_TOKEN"
]
| [] | ["GITHUB_EVENT_PATH", "GITHUB_REPOSITORY", "GIPHY_API_KEY", "GITHUB_TOKEN"] | go | 4 | 0 | |
main.py | import logging
import argparse
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from dataset import config, Dataset, collate_fn
from utils import *
from train import train, test
from model import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
cudnn.benchmark = False
def init_seeds(seed=0):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if seed == 0:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_args():
parser = argparse.ArgumentParser(description='PCA-Net parameters')
parser.add_argument('--dataset', metavar='DIR', default='bird', help='bird car aircraft')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--model-name', default='resnet50', type=str, help='model name')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--decay-step', default=2, type=int, metavar='N',
help='learning rate decay step')
parser.add_argument('--gamma', default=0.9, type=float, metavar='M',
help='gamma')
parser.add_argument('-b', '--batch-size', default=16, type=int,
metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint')
parser.add_argument('--checkpoint-path', default='./checkpoint_bird', type=str, metavar='checkpoint_path',
help='path to save checkpoint')
args = parser.parse_args()
return args
args = parse_args()
print(args)
init_seeds(seed=0)
best_acc1 = 0.
try:
os.stat(args.checkpoint_path)
except:
os.makedirs(args.checkpoint_path)
logging.info("OPENING " + args.checkpoint_path + '/results_train.csv')
logging.info("OPENING " + args.checkpoint_path + '/results_test.csv')
results_train_file = open(args.checkpoint_path + '/results_train.csv', 'w')
results_train_file.write('epoch, train_acc,train_loss\n')
results_train_file.flush()
results_test_file = open(args.checkpoint_path + '/results_test.csv', 'w')
results_test_file.write('epoch, test_acc,test_loss\n')
results_test_file.flush()
# dataset
train_root, test_root, train_pd, test_pd, cls_num = config(data=args.dataset)
data_transforms = {
'train': transforms.Compose([
transforms.Resize((512, 512)),
transforms.RandomCrop((448, 448)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]),
'test': transforms.Compose([
transforms.Resize((512, 512)),
transforms.CenterCrop((448, 448)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]),
}
train_dataset = Dataset(train_root, train_pd, train=True, transform=data_transforms['train'], num_positive=1)
test_dataset = Dataset(test_root, test_pd, train=False, transform=data_transforms['test'])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)
model = resnet50(pretrained=True, use_bp=True)
in_features = model.classifier.in_features
model.classifier = torch.nn.Linear(in_features=in_features, out_features=cls_num)
model = model.cuda()
model = torch.nn.DataParallel(model)
# feature center
feature_len = 512
center_dict = {'center': torch.zeros(cls_num, feature_len * 32)}
center = center_dict['center'].cuda()
criterion = torch.nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-5)
cudnn.benchmark = True
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=args.gamma)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
for epoch in range(args.start_epoch, args.epochs):
scheduler.step()
for param_group in optimizer.param_groups:
lr_val = float(param_group['lr'])
print("Start epoch %d, lr=%f" % (epoch, lr_val))
train_acc, train_loss = train(train_loader, model, criterion, optimizer, center)
logging.info('Iteration %d, train_acc = %.4f,train_loss = %.4f' % (epoch, train_acc, train_loss))
results_train_file.write('%d, %.4f,%.4f\n' % (epoch, train_acc, train_loss))
results_train_file.flush()
val_acc, val_loss = test(test_loader, model, criterion, center)
is_best = val_acc > best_acc1
best_acc1 = max(val_acc, best_acc1)
logging.info('Iteration %d, test_acc = %.4f,test_loss = %.4f' % (epoch, val_acc, val_loss))
results_test_file.write('%d, %.4f,%.4f\n' % (epoch, val_acc, val_loss))
results_test_file.flush()
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
'center': center
}, is_best, args.checkpoint_path)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/traffic/cmd/manager/internal/state/state.go | package state
import (
"context"
"fmt"
"os"
"sync"
"time"
"github.com/google/uuid"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/datawire/dlib/dlog"
rpc "github.com/telepresenceio/telepresence/rpc/v2/manager"
"github.com/telepresenceio/telepresence/v2/cmd/traffic/cmd/manager/internal/watchable"
"github.com/telepresenceio/telepresence/v2/cmd/traffic/cmd/manager/managerutil"
"github.com/telepresenceio/telepresence/v2/pkg/connpool"
"github.com/telepresenceio/telepresence/v2/pkg/iputil"
"github.com/telepresenceio/telepresence/v2/pkg/log"
)
type SessionState interface {
Cancel()
Done() <-chan struct{}
LastMarked() time.Time
SetLastMarked(lastMarked time.Time)
}
type sessionState struct {
done <-chan struct{}
cancel context.CancelFunc
lastMarked time.Time
}
func (ss *sessionState) Cancel() {
ss.cancel()
}
func (ss *sessionState) Done() <-chan struct{} {
return ss.done
}
func (ss *sessionState) LastMarked() time.Time {
return ss.lastMarked
}
func (ss *sessionState) SetLastMarked(lastMarked time.Time) {
ss.lastMarked = lastMarked
}
type agentTunnel struct {
name string
namespace string
tunnel connpool.Tunnel
}
type clientSessionState struct {
sessionState
name string
pool *connpool.Pool
tunnel connpool.Tunnel
agentTunnelsMu sync.Mutex
agentTunnels map[string]*agentTunnel
}
func (cs *clientSessionState) addAgentTunnel(agentSessionID, name, namespace string, tunnel connpool.Tunnel) {
cs.agentTunnelsMu.Lock()
cs.agentTunnels[agentSessionID] = &agentTunnel{
name: name,
namespace: namespace,
tunnel: tunnel,
}
cs.agentTunnelsMu.Unlock()
}
func (cs *clientSessionState) deleteAgentTunnel(agentSessionID string) {
cs.agentTunnelsMu.Lock()
delete(cs.agentTunnels, agentSessionID)
cs.agentTunnelsMu.Unlock()
}
// getRandomAgentTunnel will return the tunnel of an intercepted agent provided all intercepted
// agents live in the same namespace. The method will return nil if the client currently has no
// intercepts or if it has several intercepts that span more than one namespace.
func (cs *clientSessionState) getRandomAgentTunnel() (tunnel *agentTunnel) {
cs.agentTunnelsMu.Lock()
defer cs.agentTunnelsMu.Unlock()
prevNs := ""
for _, agentTunnel := range cs.agentTunnels {
tunnel = agentTunnel
if prevNs == "" {
prevNs = agentTunnel.namespace
} else if prevNs != agentTunnel.name {
return nil
}
}
// return the first tunnel found. In case there are several, the map will
// randomize which one
return tunnel
}
// getInterceptedAgents returns the session ID of each agent currently intercepted
// by this client
func (cs *clientSessionState) getInterceptedAgents() []string {
cs.agentTunnelsMu.Lock()
agentSessionIDs := make([]string, len(cs.agentTunnels))
i := 0
for agentSession := range cs.agentTunnels {
agentSessionIDs[i] = agentSession
i++
}
cs.agentTunnelsMu.Unlock()
return agentSessionIDs
}
type agentSessionState struct {
sessionState
agent *rpc.AgentInfo
lookups chan *rpc.LookupHostRequest
lookupResponses map[string]chan *rpc.LookupHostResponse
}
func (ss *agentSessionState) Cancel() {
close(ss.lookups)
for _, lr := range ss.lookupResponses {
close(lr)
}
ss.sessionState.Cancel()
}
// State is the total state of the Traffic Manager. A zero State is invalid; you must call
// NewState.
type State struct {
ctx context.Context
mu sync.Mutex
// Things protected by 'mu': While the watchable.WhateverMaps have their own locking to
// protect against memory corruption and ensure serialization for watches, we need to do our
// own locking here to ensure consistency between the various maps:
//
// 1. `agents` needs to stay in-sync with `sessions`
// 2. `clients` needs to stay in-sync with `sessions`
// 3. `port` needs to be updated in-sync with `intercepts`
// 4. `agentsByName` needs stay in-sync with `agents`
// 5. `intercepts` needs to be pruned in-sync with `clients` (based on
// `intercept.ClientSession.SessionId`)
// 6. `intercepts` needs to be pruned in-sync with `agents` (based on
// `agent.Name == intercept.Spec.Agent`)
// 7. `interceptAPIKeys` need to be created and updated in-sync with `intercepts` (but not deleted
// in-sync with `intercepts`; that happens separately, in `RemoveInterceptAPIKey())
intercepts watchable.InterceptMap
agents watchable.AgentMap // info for agent sessions
clients watchable.ClientMap // info for client sessions
sessions map[string]SessionState // info for all sessions
interceptAPIKeys map[string]string // InterceptIDs mapped to the APIKey used to create them
listeners map[string]connpool.Handler // listeners for all intercepts
agentsByName map[string]map[string]*rpc.AgentInfo // indexed copy of `agents`
timedLogLevel log.TimedLevel
logLevelCond sync.Cond
}
func NewState(ctx context.Context) *State {
return &State{
ctx: ctx,
sessions: make(map[string]SessionState),
interceptAPIKeys: make(map[string]string),
agentsByName: make(map[string]map[string]*rpc.AgentInfo),
listeners: make(map[string]connpool.Handler),
timedLogLevel: log.NewTimedLevel(os.Getenv("LOG_LEVEL"), log.SetLevel),
logLevelCond: sync.Cond{L: &sync.Mutex{}},
}
}
// Internal ////////////////////////////////////////////////////////////////////////////////////////
// unlockedCheckAgentsForIntercept (1) assumes that s.mu is already locked, and (2) checks the
// status of all agents that would be relevant to the given intercept spec, and returns whether the
// state of those agents would require transitioning to an error state. If everything looks good,
// it returns the zero error code (InterceptDispositionType_UNSPECIFIED).
func (s *State) unlockedCheckAgentsForIntercept(intercept *rpc.InterceptInfo) (errCode rpc.InterceptDispositionType, errMsg string) {
// Don't overwrite an existing error state
switch intercept.Disposition {
// non-error states ////////////////////////////////////////////////////
case rpc.InterceptDispositionType_UNSPECIFIED:
// Continue through; we can trasition to an error state from here.
case rpc.InterceptDispositionType_ACTIVE:
// Continue through; we can trasition to an error state from here.
case rpc.InterceptDispositionType_WAITING:
// Continue through; we can trasition to an error state from here.
// error states ////////////////////////////////////////////////////////
case rpc.InterceptDispositionType_NO_CLIENT:
// Don't overwrite this error state.
return intercept.Disposition, intercept.Message
case rpc.InterceptDispositionType_NO_AGENT:
// Continue through; this is an error state that this function "owns".
case rpc.InterceptDispositionType_NO_MECHANISM:
// Continue through; this is an error state that this function "owns".
case rpc.InterceptDispositionType_NO_PORTS:
// Don't overwrite this error state.
return intercept.Disposition, intercept.Message
case rpc.InterceptDispositionType_AGENT_ERROR:
// Continue through; the error states of this function take precedence.
case rpc.InterceptDispositionType_BAD_ARGS:
// Don't overwrite this error state.
return intercept.Disposition, intercept.Message
}
// main ////////////////////////////////////////////////////////////////
agentSet := s.agentsByName[intercept.Spec.Agent]
if len(agentSet) == 0 {
errCode = rpc.InterceptDispositionType_NO_AGENT
errMsg = fmt.Sprintf("No agent found for %q", intercept.Spec.Agent)
return
}
agentList := make([]*rpc.AgentInfo, 0, len(agentSet))
for _, agent := range agentSet {
agentList = append(agentList, agent)
}
if !managerutil.AgentsAreCompatible(agentList) {
errCode = rpc.InterceptDispositionType_NO_AGENT
errMsg = fmt.Sprintf("Agents for %q are not consistent", intercept.Spec.Agent)
return
}
if !agentHasMechanism(agentList[0], intercept.Spec.Mechanism) {
errCode = rpc.InterceptDispositionType_NO_MECHANISM
errMsg = fmt.Sprintf("Agents for %q do not have mechanism %q", intercept.Spec.Agent, intercept.Spec.Mechanism)
return
}
return rpc.InterceptDispositionType_UNSPECIFIED, ""
}
// Sessions: common ////////////////////////////////////////////////////////////////////////////////
// MarkSession marks a session as being present at the indicated time. Returns true if everything goes OK,
// returns false if the given session ID does not exist.
func (s *State) MarkSession(req *rpc.RemainRequest, now time.Time) (ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
sessionID := req.Session.SessionId
if sess, ok := s.sessions[sessionID]; ok {
sess.SetLastMarked(now)
if req.ApiKey != "" {
if client, ok := s.clients.Load(sessionID); ok {
client.ApiKey = req.ApiKey
s.clients.Store(sessionID, client)
}
}
return true
}
return false
}
// RemoveSession removes a session from the set of present session IDs.
func (s *State) RemoveSession(sessionID string) {
s.mu.Lock()
defer s.mu.Unlock()
s.unlockedRemoveSession(sessionID)
}
func (s *State) unlockedRemoveSession(sessionID string) {
if sess, ok := s.sessions[sessionID]; ok {
// kill the session
sess.Cancel()
// remove it from the agentsByName index (if nescessary)
agent, isAgent := s.agents.Load(sessionID)
if isAgent {
delete(s.agentsByName[agent.Name], sessionID)
if len(s.agentsByName[agent.Name]) == 0 {
delete(s.agentsByName, agent.Name)
}
}
// remove the session
s.agents.Delete(sessionID)
s.clients.Delete(sessionID)
delete(s.sessions, sessionID)
// GC any intercepts that relied on this session; prune any intercepts that
// 1. Don't have a client session (intercept.ClientSession.SessionId)
// 2. Don't have any agents (agent.Name == intercept.Spec.Agent)
// Alternatively, if the intercept is still live but has been switched over to a different agent, send it back to WAITING state
for interceptID, intercept := range s.intercepts.LoadAll() {
if intercept.ClientSession.SessionId == sessionID {
// Client went away:
// Delete it.
s.intercepts.Delete(interceptID)
} else if errCode, errMsg := s.unlockedCheckAgentsForIntercept(intercept); errCode != 0 {
// Refcount went to zero:
// Tell the client, so that the client can tell us to delete it.
intercept.Disposition = errCode
intercept.Message = errMsg
s.intercepts.Store(interceptID, intercept)
} else if isAgent && agent.PodIp == intercept.PodIp {
// The agent whose podIP was stored by the intercept is dead, but it's not the last agent
// Send it back to waiting so that one of the other agents can pick it up and set their own podIP
intercept.Disposition = rpc.InterceptDispositionType_WAITING
s.intercepts.Store(interceptID, intercept)
}
}
}
}
// ExpireSessions prunes any sessions that haven't had a MarkSession heartbeat since the given
// 'moment'.
func (s *State) ExpireSessions(moment time.Time) {
s.mu.Lock()
defer s.mu.Unlock()
for id, sess := range s.sessions {
if sess.LastMarked().Before(moment) {
s.unlockedRemoveSession(id)
}
}
}
// SessionDone returns a channel that is closed when the session with the given ID terminates. If
// there is no such currently-live session, then an already-closed channel is returned.
func (s *State) SessionDone(id string) <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
sess, ok := s.sessions[id]
if !ok {
ret := make(chan struct{})
close(ret)
return ret
}
return sess.Done()
}
// Sessions: Clients ///////////////////////////////////////////////////////////////////////////////
func (s *State) AddClient(client *rpc.ClientInfo, now time.Time) string {
// Use non-sequential things (i.e., UUIDs, not just a counter) as the session ID, because
// the session ID also exists in external systems (the client, SystemA), so it's confusing
// (to both humans and computers) if the manager restarts and those existing session IDs
// suddenly refer to different sessions.
sessionID := uuid.New().String()
return s.addClient(sessionID, client, now)
}
// addClient is like AddClient, but takes a sessionID, for testing purposes
func (s *State) addClient(sessionID string, client *rpc.ClientInfo, now time.Time) string {
s.mu.Lock()
defer s.mu.Unlock()
if oldClient, hasConflict := s.clients.LoadOrStore(sessionID, client); hasConflict {
panic(fmt.Errorf("duplicate id %q, existing %+v, new %+v", sessionID, oldClient, client))
}
ctx, cancel := context.WithCancel(s.ctx)
s.sessions[sessionID] = &clientSessionState{
sessionState: sessionState{
done: ctx.Done(),
cancel: cancel,
lastMarked: now,
},
name: client.Name,
pool: connpool.NewPool(),
agentTunnels: make(map[string]*agentTunnel),
}
return sessionID
}
func (s *State) GetClient(sessionID string) *rpc.ClientInfo {
ret, _ := s.clients.Load(sessionID)
return ret
}
func (s *State) GetAllClients() map[string]*rpc.ClientInfo {
return s.clients.LoadAll()
}
func (s *State) WatchClients(
ctx context.Context,
filter func(sessionID string, client *rpc.ClientInfo) bool,
) <-chan watchable.ClientMapSnapshot {
if filter == nil {
return s.clients.Subscribe(ctx)
} else {
return s.clients.SubscribeSubset(ctx, filter)
}
}
// Sessions: Agents ////////////////////////////////////////////////////////////////////////////////
func (s *State) AddAgent(agent *rpc.AgentInfo, now time.Time) string {
s.mu.Lock()
defer s.mu.Unlock()
sessionID := uuid.New().String()
if oldAgent, hasConflict := s.agents.LoadOrStore(sessionID, agent); hasConflict {
panic(fmt.Errorf("duplicate id %q, existing %+v, new %+v", sessionID, oldAgent, agent))
}
if s.agentsByName[agent.Name] == nil {
s.agentsByName[agent.Name] = make(map[string]*rpc.AgentInfo)
}
s.agentsByName[agent.Name][sessionID] = agent
ctx, cancel := context.WithCancel(s.ctx)
s.sessions[sessionID] = &agentSessionState{
sessionState: sessionState{
done: ctx.Done(),
cancel: cancel,
lastMarked: now,
},
lookups: make(chan *rpc.LookupHostRequest),
lookupResponses: make(map[string]chan *rpc.LookupHostResponse),
agent: agent,
}
for interceptID, intercept := range s.intercepts.LoadAll() {
// Check whether each intercept needs to either (1) be moved in to a NO_AGENT state
// because this agent made things inconsistent, or (2) be moved out of a NO_AGENT
// state because it just gained an agent.
if errCode, errMsg := s.unlockedCheckAgentsForIntercept(intercept); errCode != 0 {
intercept.Disposition = errCode
intercept.Message = errMsg
s.intercepts.Store(interceptID, intercept)
} else if intercept.Disposition == rpc.InterceptDispositionType_NO_AGENT {
intercept.Disposition = rpc.InterceptDispositionType_WAITING
intercept.Message = ""
s.intercepts.Store(interceptID, intercept)
}
}
return sessionID
}
func (s *State) GetAgent(sessionID string) *rpc.AgentInfo {
ret, _ := s.agents.Load(sessionID)
return ret
}
func (s *State) GetAllAgents() map[string]*rpc.AgentInfo {
return s.agents.LoadAll()
}
func (s *State) GetAgentsByName(name, namespace string) map[string]*rpc.AgentInfo {
s.mu.Lock()
defer s.mu.Unlock()
ret := make(map[string]*rpc.AgentInfo, len(s.agentsByName[name]))
for k, v := range s.agentsByName[name] {
if v.Namespace == namespace {
ret[k] = proto.Clone(v).(*rpc.AgentInfo)
}
}
return ret
}
func (s *State) WatchAgents(
ctx context.Context,
filter func(sessionID string, agent *rpc.AgentInfo) bool,
) <-chan watchable.AgentMapSnapshot {
if filter == nil {
return s.agents.Subscribe(ctx)
} else {
return s.agents.SubscribeSubset(ctx, filter)
}
}
// Intercepts //////////////////////////////////////////////////////////////////////////////////////
func (s *State) AddIntercept(sessionID, apiKey string, spec *rpc.InterceptSpec) (*rpc.InterceptInfo, error) {
s.mu.Lock()
defer s.mu.Unlock()
interceptID := fmt.Sprintf("%s:%s", sessionID, spec.Name)
s.interceptAPIKeys[interceptID] = apiKey
cept := &rpc.InterceptInfo{
Spec: spec,
Disposition: rpc.InterceptDispositionType_WAITING,
Message: "Waiting for Agent approval",
Id: interceptID,
ClientSession: &rpc.SessionInfo{
SessionId: sessionID,
},
ApiKey: apiKey,
}
// Wrap each potential-state-change in a
//
// if cept.Disposition == rpc.InterceptDispositionType_WAITING { … }
//
// so that we don't need to worry about different state-changes stomping on eachother.
if cept.Disposition == rpc.InterceptDispositionType_WAITING {
if errCode, errMsg := s.unlockedCheckAgentsForIntercept(cept); errCode != 0 {
cept.Disposition = errCode
cept.Message = errMsg
}
}
if _, hasConflict := s.intercepts.LoadOrStore(cept.Id, cept); hasConflict {
return nil, status.Errorf(codes.AlreadyExists, "Intercept named %q already exists", spec.Name)
}
return cept, nil
}
// getAgentsInterceptedByClient returns the session IDs for each agent that is currently
// intercepted by the client with the given client session ID.
func (s *State) getAgentsInterceptedByClient(clientSessionID string) ([]string, error) {
s.mu.Lock()
ss := s.sessions[clientSessionID]
s.mu.Unlock()
if cs, ok := ss.(*clientSessionState); ok {
return cs.getInterceptedAgents(), nil
}
return nil, status.Errorf(codes.NotFound, "Client session %q not found", clientSessionID)
}
// UpdateIntercept applies a given mutator function to the stored intercept with interceptID;
// storing and returning the result. If the given intercept does not exist, then the mutator
// function is not run, and nil is returned.
//
// This does not lock; but instead uses CAS and may therefore call the mutator function multiple
// times. So: it is safe to perform blocking operations in your mutator function, but you must take
// care that it is safe to call your mutator function multiple times.
func (s *State) UpdateIntercept(interceptID string, apply func(*rpc.InterceptInfo)) *rpc.InterceptInfo {
for {
cur, ok := s.intercepts.Load(interceptID)
if !ok || cur == nil {
// Doesn't exist (possibly was deleted while this loop was running).
return nil
}
new := proto.Clone(cur).(*rpc.InterceptInfo)
apply(new)
swapped := s.intercepts.CompareAndSwap(new.Id, cur, new)
if swapped {
// Success!
return new
}
}
}
func (s *State) RemoveIntercept(interceptID string) bool {
_, didDelete := s.intercepts.LoadAndDelete(interceptID)
if didDelete {
s.mu.Lock()
l, ok := s.listeners[interceptID]
if ok {
delete(s.listeners, interceptID)
}
s.mu.Unlock()
if ok {
l.Close(s.ctx)
}
}
return didDelete
}
// GetInterceptAPIKey returns the first non-empty apiKey associated with an intercept IDs.
// We use this fuction as a last resort if we need to garbage collect intercepts when
// there are no active sessions.
func (s *State) GetInterceptAPIKey() string {
s.mu.Lock()
defer s.mu.Unlock()
for _, key := range s.interceptAPIKeys {
if key != "" {
return key
}
}
return ""
}
// RemoveInterceptAPIKey removes the associated APIKey for an Intercept ID
// Only call on an intercept that has been deleted.
func (s *State) RemoveInterceptAPIKey(interceptID string) bool {
// If the APIKey isn't present, then we return false since we didn't remove
// anything since no APIKey was associated with that intercept.
s.mu.Lock()
if _, ok := s.interceptAPIKeys[interceptID]; !ok {
return false
}
delete(s.interceptAPIKeys, interceptID)
s.mu.Unlock()
return true
}
func (s *State) GetIntercept(interceptID string) *rpc.InterceptInfo {
intercept, _ := s.intercepts.Load(interceptID)
return intercept
}
func (s *State) WatchIntercepts(
ctx context.Context,
filter func(sessionID string, intercept *rpc.InterceptInfo) bool,
) <-chan watchable.InterceptMapSnapshot {
if filter == nil {
return s.intercepts.Subscribe(ctx)
} else {
return s.intercepts.SubscribeSubset(ctx, filter)
}
}
func (s *State) ClientTunnel(ctx context.Context, tunnel connpool.Tunnel) error {
sessionID := managerutil.GetSessionID(ctx)
s.mu.Lock()
ss := s.sessions[sessionID]
s.mu.Unlock()
cs, ok := ss.(*clientSessionState)
if !ok {
return status.Errorf(codes.NotFound, "Client session %q not found", sessionID)
}
dlog.Debug(ctx, "Established TCP tunnel")
pool := cs.pool // must have one pool per client
cs.tunnel = tunnel
defer pool.CloseAll(ctx)
msgCh, errCh := tunnel.ReadLoop(ctx)
for {
select {
case <-ctx.Done():
return nil
case err := <-errCh:
return err
case msg := <-msgCh:
if msg == nil {
return nil
}
id := msg.ID()
var handler connpool.Handler
if ctrl, ok := msg.(connpool.Control); ok {
switch ctrl.Code() {
// Don't establish a conn-forward or dialer just to say goodbye
case connpool.Disconnect, connpool.DisconnectOK:
if handler = pool.Get(id); handler == nil {
continue
}
}
}
if handler == nil {
// Retrieve the connection that is tracked for the given id. Create a new one if necessary
var err error
handler, _, err = pool.GetOrCreate(ctx, id, func(ctx context.Context, release func()) (connpool.Handler, error) {
if agentTunnel := cs.getRandomAgentTunnel(); agentTunnel != nil {
// Dispatch directly to agent and let the dial happen there
dlog.Debugf(ctx, "|| FRWD %s forwarding client connection to agent %s.%s", id, agentTunnel.name, agentTunnel.namespace)
return newConnForward(release, agentTunnel.tunnel), nil
}
return connpool.NewDialer(id, cs.tunnel, release), nil
})
if err != nil {
return fmt.Errorf("failed to get connection handler: %w", err)
}
}
handler.HandleMessage(ctx, msg)
}
}
}
type connForward struct {
release func()
toStream connpool.Tunnel
}
func newConnForward(release func(), toStream connpool.Tunnel) *connForward {
return &connForward{release: release, toStream: toStream}
}
func (cf *connForward) Close(_ context.Context) {
cf.release()
}
func (cf *connForward) HandleMessage(ctx context.Context, msg connpool.Message) {
dlog.Debugf(ctx, ">> FRWD %s to agent", msg.ID())
if err := cf.toStream.Send(ctx, msg); err != nil {
dlog.Errorf(ctx, "!! FRWD %s to agent, send failed: %v", msg.ID(), err)
}
if ctrl, ok := msg.(connpool.Control); ok {
switch ctrl.Code() {
case connpool.Disconnect, connpool.DisconnectOK:
// There will be no more messages coming our way
cf.Close(ctx)
dlog.Debugf(ctx, "-- FRWD %s to agent closed", msg.ID())
}
}
}
func (cf *connForward) Start(_ context.Context) {
}
func (s *State) AgentTunnel(ctx context.Context, clientSessionInfo *rpc.SessionInfo, tunnel connpool.Tunnel) error {
agentSessionID := managerutil.GetSessionID(ctx)
as, cs, err := func() (*agentSessionState, *clientSessionState, error) {
s.mu.Lock()
defer s.mu.Unlock()
ss := s.sessions[agentSessionID]
as, ok := ss.(*agentSessionState)
if !ok {
return nil, nil, status.Errorf(codes.NotFound, "agent session %q not found", agentSessionID)
}
clientSessionID := clientSessionInfo.GetSessionId()
ss = s.sessions[clientSessionID]
cs, ok := ss.(*clientSessionState)
if !ok {
return nil, nil, status.Errorf(codes.NotFound, "client session %q not found", clientSessionID)
}
return as, cs, nil
}()
if err != nil {
return err
}
dlog.Debugf(ctx, "Established TCP tunnel from agent %s (%s) to client %s", as.agent.Name, as.agent.PodIp, cs.name)
// During intercept, all requests that are made to this pool, are forwarded to the intercepted
// agent(s)
cs.addAgentTunnel(agentSessionID, as.agent.Name, as.agent.Namespace, tunnel)
defer cs.deleteAgentTunnel(agentSessionID)
pool := cs.pool
msgCh, errCh := tunnel.ReadLoop(ctx)
for {
select {
case <-ctx.Done():
return nil
case err = <-errCh:
return err
case msg := <-msgCh:
if msg == nil {
return nil
}
if ctrl, ok := msg.(connpool.Control); ok {
switch ctrl.Code() {
// Don't establish a conn-forward just to say goodbye
case connpool.Disconnect, connpool.DisconnectOK:
continue
}
}
_, _, err = pool.GetOrCreate(ctx, msg.ID(), func(ctx context.Context, release func()) (connpool.Handler, error) {
return newConnForward(release, tunnel), nil
})
if err != nil {
dlog.Error(ctx, err)
return status.Error(codes.Internal, err.Error())
}
dlog.Debugf(ctx, ">> FRWD %s to client", msg.ID())
if err = cs.tunnel.Send(ctx, msg); err != nil {
dlog.Errorf(ctx, "Send to client failed: %v", err)
return err
}
}
}
}
// AgentsLookup will send the given request to all agents currently intercepted by the client identified with
// the clientSessionID, it will then wait for results to arrive, collect those results, and return them as a
// unique and sorted slice.
func (s *State) AgentsLookup(ctx context.Context, clientSessionID string, request *rpc.LookupHostRequest) (iputil.IPs, error) {
iceptAgentIDs, err := s.getAgentsInterceptedByClient(clientSessionID)
if err != nil {
return nil, err
}
ips := iputil.IPs{}
iceptCount := len(iceptAgentIDs)
if iceptCount == 0 {
return ips, nil
}
rsMu := sync.Mutex{} // prevent concurrent updates of the ips slice
agentTimeout, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
responseCount := 0
defer cancel()
wg := sync.WaitGroup{}
wg.Add(iceptCount)
for _, agentSessionID := range iceptAgentIDs {
go func(agentSessionID string) {
defer func() {
s.endHostLookup(agentSessionID, request)
wg.Done()
}()
rsCh := s.startHostLookup(agentSessionID, request)
if rsCh == nil {
return
}
select {
case <-agentTimeout.Done():
return
case rs := <-rsCh:
if rs == nil {
// Channel closed
return
}
rsMu.Lock()
responseCount++
rc := responseCount
for _, ip := range rs.Ips {
ips = append(ips, ip)
}
rsMu.Unlock()
if rc == iceptCount {
// all agents have responded
return
}
}
}(agentSessionID)
}
wg.Wait() // wait for timeout or that all agents have responded
return ips.UniqueSorted(), nil
}
// PostLookupResponse receives lookup responses from an agent and places them in the channel
// that corresponds to the lookup request
func (s *State) PostLookupResponse(response *rpc.LookupHostAgentResponse) {
responseID := response.Request.Session.SessionId + ":" + response.Request.Host
var rch chan<- *rpc.LookupHostResponse
s.mu.Lock()
if as, ok := s.sessions[response.Session.SessionId].(*agentSessionState); ok {
rch = as.lookupResponses[responseID]
}
s.mu.Unlock()
if rch != nil {
rch <- response.Response
}
}
func (s *State) startHostLookup(agentSessionID string, request *rpc.LookupHostRequest) <-chan *rpc.LookupHostResponse {
responseID := request.Session.SessionId + ":" + request.Host
var (
rch chan *rpc.LookupHostResponse
as *agentSessionState
ok bool
)
s.mu.Lock()
if as, ok = s.sessions[agentSessionID].(*agentSessionState); ok {
if rch, ok = as.lookupResponses[responseID]; !ok {
rch = make(chan *rpc.LookupHostResponse)
as.lookupResponses[responseID] = rch
}
}
s.mu.Unlock()
if as != nil {
// the as.lookups channel may be closed at this point, so guard for panic
func() {
defer func() {
if r := recover(); r != nil {
close(rch)
}
}()
as.lookups <- request
}()
}
return rch
}
func (s *State) endHostLookup(agentSessionID string, request *rpc.LookupHostRequest) {
responseID := request.Session.SessionId + ":" + request.Host
s.mu.Lock()
if as, ok := s.sessions[agentSessionID].(*agentSessionState); ok {
if rch, ok := as.lookupResponses[responseID]; ok {
delete(as.lookupResponses, responseID)
close(rch)
}
}
s.mu.Unlock()
}
func (s *State) WatchLookupHost(agentSessionID string) <-chan *rpc.LookupHostRequest {
s.mu.Lock()
ss, ok := s.sessions[agentSessionID]
s.mu.Unlock()
if !ok {
return nil
}
return ss.(*agentSessionState).lookups
}
// SetTempLogLevel sets the temporary log-level for the traffic-manager and all agents and,
// if a duration is given, it also starts a timer that will reset the log-level once it
// fires.
func (s *State) SetTempLogLevel(ctx context.Context, logLevelRequest *rpc.LogLevelRequest) {
duration := time.Duration(0)
if gd := logLevelRequest.Duration; gd != nil {
duration = gd.AsDuration()
}
s.timedLogLevel.Set(ctx, logLevelRequest.LogLevel, duration)
s.logLevelCond.Broadcast()
}
// InitialTempLogLevel returns the temporary log-level if it exists, along with the remaining
// duration for it, which might be zero, in which case the log-level is valid until a new
// level is requested.
func (s *State) InitialTempLogLevel() *rpc.LogLevelRequest {
level, duration := s.timedLogLevel.Get()
if level == "" {
return nil
}
return &rpc.LogLevelRequest{
LogLevel: level,
Duration: durationpb.New(duration),
}
}
// WaitForTempLogLevel waits for a new temporary log-level request. It returns the values
// of the last request that was made.
func (s *State) WaitForTempLogLevel() *rpc.LogLevelRequest {
s.logLevelCond.L.Lock()
defer s.logLevelCond.L.Unlock()
s.logLevelCond.Wait()
return s.InitialTempLogLevel()
}
| [
"\"LOG_LEVEL\""
]
| []
| [
"LOG_LEVEL"
]
| [] | ["LOG_LEVEL"] | go | 1 | 0 | |
build/landmines.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import errno
import gyp_environment
import logging
import optparse
import os
import sys
import subprocess
import time
import clobber
import landmine_utils
def get_build_dir(build_tool, src_dir, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(src_dir, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(src_dir, output_dir)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def clobber_if_necessary(new_landmines, src_dir):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder(), src_dir)
landmines_path = os.path.normpath(os.path.join(src_dir, '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if os.path.exists(landmines_path):
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
sys.stdout.flush()
clobber.clobber(out_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns an options object containing the configuration for this script."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-d', '--src-dir',
help='Path of the source root dir. Overrides the default location of the '
'source root dir when calculating the build directory.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
if options.src_dir:
if not os.path.isdir(options.src_dir):
parser.error('Cannot find source root dir at %s' % options.src_dir)
logging.debug('Overriding source root dir. Using: %s', options.src_dir)
else:
options.src_dir = \
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if not options.landmine_scripts:
options.landmine_scripts = [os.path.join(options.src_dir, 'build',
'get_landmines.py')]
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
options.landmine_scripts += [extra_script]
return options
def main():
options = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
gyp_environment.SetEnvironment()
landmines = []
for s in options.landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
clobber_if_necessary(landmines, options.src_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"EXTRA_LANDMINES_SCRIPT",
"CHROMIUM_OUT_DIR"
]
| [] | ["EXTRA_LANDMINES_SCRIPT", "CHROMIUM_OUT_DIR"] | python | 2 | 0 | |
engine/cmd_start.go | package main
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
"sort"
"strings"
"sync"
"syscall"
"time"
"github.com/ovh/cds/engine/api"
"github.com/ovh/cds/engine/cdn"
"github.com/ovh/cds/engine/elasticsearch"
"github.com/ovh/cds/engine/hatchery/kubernetes"
"github.com/ovh/cds/engine/hatchery/local"
"github.com/ovh/cds/engine/hatchery/marathon"
"github.com/ovh/cds/engine/hatchery/openstack"
"github.com/ovh/cds/engine/hatchery/swarm"
"github.com/ovh/cds/engine/hatchery/vsphere"
"github.com/ovh/cds/engine/hooks"
"github.com/ovh/cds/engine/migrateservice"
"github.com/ovh/cds/engine/repositories"
"github.com/ovh/cds/engine/service"
"github.com/ovh/cds/engine/ui"
"github.com/ovh/cds/engine/vcs"
"github.com/ovh/cds/sdk"
"github.com/ovh/cds/sdk/log"
"github.com/ovh/cds/sdk/telemetry"
"github.com/spf13/cobra"
)
func init() {
startCmd.Flags().StringVar(&flagStartConfigFile, "config", "", "config file")
startCmd.Flags().StringVar(&flagStartRemoteConfig, "remote-config", "", "(optional) consul configuration store")
startCmd.Flags().StringVar(&flagStartRemoteConfigKey, "remote-config-key", "cds/config.api.toml", "(optional) consul configuration store key")
startCmd.Flags().StringVar(&flagStartVaultAddr, "vault-addr", "", "(optional) Vault address to fetch secrets from vault (example: https://vault.mydomain.net:8200)")
startCmd.Flags().StringVar(&flagStartVaultToken, "vault-token", "", "(optional) Vault token to fetch secrets from vault")
}
var (
flagStartConfigFile string
flagStartRemoteConfig string
flagStartRemoteConfigKey string
flagStartVaultAddr string
flagStartVaultToken string
)
type serviceConf struct {
arg string
service service.Service
cfg interface{}
}
var startCmd = &cobra.Command{
Use: "start",
Short: "Start CDS",
Long: `
Start CDS Engine Services
#### API
This is the core component of CDS.
#### Hatcheries
They are the components responsible for spawning workers. Supported integrations/orchestrators are:
* Local machine
* Openstack
* Docker Swarm
* Openstack
* Vsphere
#### Hooks
This component operates CDS workflow hooks
#### Repositories
This component operates CDS workflow repositories
#### VCS
This component operates CDS VCS connectivity
#### CDN
This component operates CDS CDN to handle storage
Start all of this with a single command:
$ engine start [api] [cdn] [hatchery:local] [hatchery:marathon] [hatchery:openstack] [hatchery:swarm] [hatchery:vsphere] [elasticsearch] [hooks] [vcs] [repositories] [migrate] [ui]
All the services are using the same configuration file format.
You have to specify where the toml configuration is. It can be a local file, provided by consul or vault.
You can also use or override toml file with environment variable.
See $ engine config command for more details.
`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
args = strings.Split(os.Getenv("CDS_SERVICE"), " ")
}
if len(args) == 0 {
cmd.Help() // nolint
return
}
// Initialize config
conf := configImport(args, flagStartConfigFile, flagStartRemoteConfig, flagStartRemoteConfigKey, flagStartVaultAddr, flagStartVaultToken, false)
ctx, cancel := context.WithCancel(context.Background())
// initialize context
defer cancel()
var (
serviceConfs []serviceConf
names []string
types []string
)
for _, a := range args {
fmt.Printf("Starting service %s\n", a)
switch a {
case sdk.TypeAPI:
if conf.API == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: api.New(), cfg: *conf.API})
names = append(names, conf.API.Name)
types = append(types, sdk.TypeAPI)
case sdk.TypeUI:
if conf.UI == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: ui.New(), cfg: *conf.UI})
names = append(names, conf.UI.Name)
types = append(types, sdk.TypeUI)
case "migrate":
if conf.DatabaseMigrate == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: migrateservice.New(), cfg: *conf.DatabaseMigrate})
names = append(names, conf.DatabaseMigrate.Name)
types = append(types, sdk.TypeDBMigrate)
case sdk.TypeHatchery + ":local":
if conf.Hatchery.Local == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: local.New(), cfg: *conf.Hatchery.Local})
names = append(names, conf.Hatchery.Local.Name)
types = append(types, sdk.TypeHatchery)
case sdk.TypeHatchery + ":kubernetes":
if conf.Hatchery.Kubernetes == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: kubernetes.New(), cfg: *conf.Hatchery.Kubernetes})
names = append(names, conf.Hatchery.Kubernetes.Name)
types = append(types, sdk.TypeHatchery)
case sdk.TypeHatchery + ":marathon":
if conf.Hatchery.Marathon == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: marathon.New(), cfg: *conf.Hatchery.Marathon})
names = append(names, conf.Hatchery.Marathon.Name)
types = append(types, sdk.TypeHatchery)
case sdk.TypeHatchery + ":openstack":
if conf.Hatchery.Openstack == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: openstack.New(), cfg: *conf.Hatchery.Openstack})
names = append(names, conf.Hatchery.Openstack.Name)
types = append(types, sdk.TypeAPI)
case sdk.TypeHatchery + ":swarm":
if conf.Hatchery.Swarm == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: swarm.New(), cfg: *conf.Hatchery.Swarm})
names = append(names, conf.Hatchery.Swarm.Name)
types = append(types, sdk.TypeHatchery)
case sdk.TypeHatchery + ":vsphere":
if conf.Hatchery.VSphere == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: vsphere.New(), cfg: *conf.Hatchery.VSphere})
names = append(names, conf.Hatchery.VSphere.Name)
types = append(types, sdk.TypeHatchery)
case sdk.TypeHooks:
if conf.Hooks == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: hooks.New(), cfg: *conf.Hooks})
names = append(names, conf.Hooks.Name)
types = append(types, sdk.TypeHooks)
case sdk.TypeCDN:
if conf.CDN == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: cdn.New(), cfg: *conf.CDN})
names = append(names, conf.CDN.Name)
types = append(types, sdk.TypeCDN)
case sdk.TypeVCS:
if conf.VCS == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: vcs.New(), cfg: *conf.VCS})
names = append(names, conf.VCS.Name)
types = append(types, sdk.TypeVCS)
case sdk.TypeRepositories:
if conf.Repositories == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: repositories.New(), cfg: *conf.Repositories})
names = append(names, conf.Repositories.Name)
types = append(types, sdk.TypeRepositories)
case sdk.TypeElasticsearch:
if conf.ElasticSearch == nil {
sdk.Exit("Unable to start: missing service %s configuration", a)
}
serviceConfs = append(serviceConfs, serviceConf{arg: a, service: elasticsearch.New(), cfg: *conf.ElasticSearch})
names = append(names, conf.ElasticSearch.Name)
types = append(types, sdk.TypeElasticsearch)
default:
fmt.Printf("Error: service '%s' unknown\n", a)
os.Exit(1)
}
}
// gracefully shutdown all
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func(ctx context.Context) {
<-c
unregisterServices(ctx, serviceConfs)
signal.Stop(c)
cancel()
}(ctx)
//Initialize logs
logConf := log.Conf{
Level: conf.Log.Level,
GraylogProtocol: conf.Log.Graylog.Protocol,
GraylogHost: conf.Log.Graylog.Host,
GraylogPort: fmt.Sprintf("%d", conf.Log.Graylog.Port),
GraylogExtraKey: conf.Log.Graylog.ExtraKey,
GraylogExtraValue: conf.Log.Graylog.ExtraValue,
GraylogFieldCDSVersion: sdk.VERSION,
GraylogFieldCDSOS: sdk.GOOS,
GraylogFieldCDSArch: sdk.GOARCH,
GraylogFieldCDSServiceName: strings.Join(names, "_"),
GraylogFieldCDSServiceType: strings.Join(types, "_"),
}
log.Initialize(ctx, &logConf)
// Sort the slice of services we have to start to be sure to start the API au first
sort.Slice(serviceConfs, func(i, j int) bool {
return serviceConfs[i].arg < serviceConfs[j].arg
})
var wg sync.WaitGroup
//Configure the services
for i := range serviceConfs {
s := serviceConfs[i]
if err := s.service.ApplyConfiguration(s.cfg); err != nil {
sdk.Exit("Unable to init service %s: %v", s.arg, err)
}
log.Info(ctx, "%s> %s configuration applied", s.arg, s.service.Name())
if srv, ok := s.service.(service.BeforeStart); ok {
if err := srv.BeforeStart(ctx); err != nil {
sdk.Exit("Unable to start service %s: %v", s.arg, err)
}
}
ctx, err := telemetry.Init(ctx, conf.Telemetry, s.service)
if err != nil {
sdk.Exit("Unable to start tracing exporter: %v", err)
}
wg.Add(1)
go func(srv serviceConf) {
start(ctx, srv.service, srv.cfg, srv.arg)
wg.Done()
}(s)
// Stupid trick: when API is starting wait a bit before start the other
if s.arg == "API" || s.arg == "api" {
time.Sleep(2 * time.Second)
}
}
wg.Wait()
//Wait for the end
<-ctx.Done()
},
}
func unregisterServices(ctx context.Context, serviceConfs []serviceConf) {
// unregister all services
for i := range serviceConfs {
s := serviceConfs[i]
fmt.Printf("Unregister (%v)\n", s.service.Name())
if err := s.service.Unregister(ctx); err != nil {
log.Error(ctx, "%s> Unable to unregister: %v", s.service.Name(), err)
}
}
if ctx.Err() != nil {
fmt.Printf("Exiting (%v)\n", ctx.Err())
}
}
func start(c context.Context, s service.Service, cfg interface{}, serviceName string) {
if err := serve(c, s, serviceName, cfg); err != nil {
fmt.Printf("Service has been stopped: %s %+v", serviceName, err)
}
}
func serve(c context.Context, s service.Service, serviceName string, cfg interface{}) error {
ctx, cancel := context.WithCancel(c)
defer cancel()
x, err := s.Init(cfg)
if err != nil {
return err
}
// first signin
if err := s.Start(ctx, x); err != nil {
log.Error(ctx, "%s> Unable to start service: %v", serviceName, err)
return err
}
var srvConfig sdk.ServiceConfig
b, _ := json.Marshal(cfg)
json.Unmarshal(b, &srvConfig) // nolint
// then register
if err := s.Register(c, srvConfig); err != nil {
log.Error(ctx, "%s> Unable to register: %v", serviceName, err)
return err
}
log.Info(ctx, "%s> Service registered", serviceName)
// finally start the heartbeat goroutine
go func() {
if err := s.Heartbeat(ctx, s.Status); err != nil {
log.Error(ctx, "%v", err)
cancel()
}
}()
go func() {
if err := s.Serve(ctx); err != nil {
log.Error(ctx, "%s> Serve: %+v", serviceName, err)
cancel()
}
}()
<-ctx.Done()
if ctx.Err() != nil {
log.Error(ctx, "%s> Service exiting with err: %+v", serviceName, ctx.Err())
} else {
log.Info(ctx, "%s> Service exiting", serviceName)
}
return ctx.Err()
}
| [
"\"CDS_SERVICE\""
]
| []
| [
"CDS_SERVICE"
]
| [] | ["CDS_SERVICE"] | go | 1 | 0 | |
django/ams2/wsgi.py | """
WSGI config for ams2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ["DJANGO_SETTINGS_MODULE"] = "ams2.settings"
# os.environ["UWSGI_ROUTE_HOST"] = "`^(?!localhost$) break:400"
application = get_wsgi_application()
| []
| []
| [
"UWSGI_ROUTE_HOST",
"DJANGO_SETTINGS_MODULE"
]
| [] | ["UWSGI_ROUTE_HOST", "DJANGO_SETTINGS_MODULE"] | python | 2 | 0 | |
vendor/github.com/hashicorp/aws-sdk-go-base/awsauth.go | package awsbase
import (
"errors"
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
homedir "github.com/mitchellh/go-homedir"
)
const (
// Default amount of time for EC2/ECS metadata client operations.
// Keep this value low to prevent long delays in non-EC2/ECS environments.
DefaultMetadataClientTimeout = 100 * time.Millisecond
)
// GetAccountIDAndPartition gets the account ID and associated partition.
func GetAccountIDAndPartition(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
var accountID, partition string
var err, errors error
if authProviderName == ec2rolecreds.ProviderName {
accountID, partition, err = GetAccountIDAndPartitionFromEC2Metadata()
} else {
accountID, partition, err = GetAccountIDAndPartitionFromIAMGetUser(iamconn)
}
if accountID != "" {
return accountID, partition, nil
}
errors = multierror.Append(errors, err)
accountID, partition, err = GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn)
if accountID != "" {
return accountID, partition, nil
}
errors = multierror.Append(errors, err)
accountID, partition, err = GetAccountIDAndPartitionFromIAMListRoles(iamconn)
if accountID != "" {
return accountID, partition, nil
}
errors = multierror.Append(errors, err)
return accountID, partition, errors
}
// GetAccountIDAndPartitionFromEC2Metadata gets the account ID and associated
// partition from EC2 metadata.
func GetAccountIDAndPartitionFromEC2Metadata() (string, string, error) {
log.Println("[DEBUG] Trying to get account information via EC2 Metadata")
cfg := &aws.Config{}
setOptionalEndpoint(cfg)
sess, err := session.NewSession(cfg)
if err != nil {
return "", "", fmt.Errorf("error creating EC2 Metadata session: %w", err)
}
metadataClient := ec2metadata.New(sess)
info, err := metadataClient.IAMInfo()
if err != nil {
// We can end up here if there's an issue with the instance metadata service
// or if we're getting credentials from AdRoll's Hologram (in which case IAMInfo will
// error out).
err = fmt.Errorf("failed getting account information via EC2 Metadata IAM information: %w", err)
log.Printf("[DEBUG] %s", err)
return "", "", err
}
return parseAccountIDAndPartitionFromARN(info.InstanceProfileArn)
}
// GetAccountIDAndPartitionFromIAMGetUser gets the account ID and associated
// partition from IAM.
func GetAccountIDAndPartitionFromIAMGetUser(iamconn *iam.IAM) (string, string, error) {
log.Println("[DEBUG] Trying to get account information via iam:GetUser")
output, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case "AccessDenied", "InvalidClientTokenId", "ValidationError":
return "", "", nil
}
}
err = fmt.Errorf("failed getting account information via iam:GetUser: %w", err)
log.Printf("[DEBUG] %s", err)
return "", "", err
}
if output == nil || output.User == nil {
err = errors.New("empty iam:GetUser response")
log.Printf("[DEBUG] %s", err)
return "", "", err
}
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.User.Arn))
}
// GetAccountIDAndPartitionFromIAMListRoles gets the account ID and associated
// partition from listing IAM roles.
func GetAccountIDAndPartitionFromIAMListRoles(iamconn *iam.IAM) (string, string, error) {
log.Println("[DEBUG] Trying to get account information via iam:ListRoles")
output, err := iamconn.ListRoles(&iam.ListRolesInput{
MaxItems: aws.Int64(int64(1)),
})
if err != nil {
err = fmt.Errorf("failed getting account information via iam:ListRoles: %w", err)
log.Printf("[DEBUG] %s", err)
return "", "", err
}
if output == nil || len(output.Roles) < 1 {
err = fmt.Errorf("empty iam:ListRoles response")
log.Printf("[DEBUG] %s", err)
return "", "", err
}
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Roles[0].Arn))
}
// GetAccountIDAndPartitionFromSTSGetCallerIdentity gets the account ID and associated
// partition from STS caller identity.
func GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn *sts.STS) (string, string, error) {
log.Println("[DEBUG] Trying to get account information via sts:GetCallerIdentity")
output, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return "", "", fmt.Errorf("error calling sts:GetCallerIdentity: %w", err)
}
if output == nil || output.Arn == nil {
err = errors.New("empty sts:GetCallerIdentity response")
log.Printf("[DEBUG] %s", err)
return "", "", err
}
return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Arn))
}
func parseAccountIDAndPartitionFromARN(inputARN string) (string, string, error) {
arn, err := arn.Parse(inputARN)
if err != nil {
return "", "", fmt.Errorf("error parsing ARN (%s): %s", inputARN, err)
}
return arn.AccountID, arn.Partition, nil
}
// GetCredentialsFromSession returns credentials derived from a session. A
// session uses the AWS SDK Go chain of providers so may use a provider (e.g.,
// ProcessProvider) that is not part of the Terraform provider chain.
func GetCredentialsFromSession(c *Config) (*awsCredentials.Credentials, error) {
log.Printf("[INFO] Attempting to use session-derived credentials")
// Avoid setting HTTPClient here as it will prevent the ec2metadata
// client from automatically lowering the timeout to 1 second.
options := &session.Options{
Config: aws.Config{
EndpointResolver: c.EndpointResolver(),
MaxRetries: aws.Int(0),
Region: aws.String(c.Region),
},
Profile: c.Profile,
SharedConfigState: session.SharedConfigEnable,
}
sess, err := session.NewSessionWithOptions(*options)
if err != nil {
if IsAWSErr(err, "NoCredentialProviders", "") {
return nil, c.NewNoValidCredentialSourcesError(err)
}
return nil, fmt.Errorf("Error creating AWS session: %w", err)
}
creds := sess.Config.Credentials
cp, err := sess.Config.Credentials.Get()
if err != nil {
return nil, c.NewNoValidCredentialSourcesError(err)
}
log.Printf("[INFO] Successfully derived credentials from session")
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
return creds, nil
}
// GetCredentials gets credentials from the environment, shared credentials,
// the session (which may include a credential process), or ECS/EC2 metadata endpoints.
// GetCredentials also validates the credentials and the ability to assume a role
// or will return an error if unsuccessful.
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
sharedCredentialsFilename, err := homedir.Expand(c.CredsFilename)
if err != nil {
return nil, fmt.Errorf("error expanding shared credentials filename: %w", err)
}
// build a chain provider, lazy-evaluated by aws-sdk
providers := []awsCredentials.Provider{
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
AccessKeyID: c.AccessKey,
SecretAccessKey: c.SecretKey,
SessionToken: c.Token,
}},
&awsCredentials.EnvProvider{},
&awsCredentials.SharedCredentialsProvider{
Filename: sharedCredentialsFilename,
Profile: c.Profile,
},
}
// Validate the credentials before returning them
creds := awsCredentials.NewChainCredentials(providers)
cp, err := creds.Get()
if err != nil {
if IsAWSErr(err, "NoCredentialProviders", "") {
creds, err = GetCredentialsFromSession(c)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %w", err)
}
} else {
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
}
// This is the "normal" flow (i.e. not assuming a role)
if c.AssumeRoleARN == "" {
return creds, nil
}
// Otherwise we need to construct an STS client with the main credentials, and verify
// that we can assume the defined role.
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q)",
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID)
awsConfig := &aws.Config{
Credentials: creds,
EndpointResolver: c.EndpointResolver(),
Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
}
assumeRoleSession, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("error creating assume role session: %w", err)
}
stsclient := sts.New(assumeRoleSession)
assumeRoleProvider := &stscreds.AssumeRoleProvider{
Client: stsclient,
RoleARN: c.AssumeRoleARN,
}
if c.AssumeRoleDurationSeconds > 0 {
assumeRoleProvider.Duration = time.Duration(c.AssumeRoleDurationSeconds) * time.Second
}
if c.AssumeRoleExternalID != "" {
assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID)
}
if c.AssumeRolePolicy != "" {
assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy)
}
if len(c.AssumeRolePolicyARNs) > 0 {
var policyDescriptorTypes []*sts.PolicyDescriptorType
for _, policyARN := range c.AssumeRolePolicyARNs {
policyDescriptorType := &sts.PolicyDescriptorType{
Arn: aws.String(policyARN),
}
policyDescriptorTypes = append(policyDescriptorTypes, policyDescriptorType)
}
assumeRoleProvider.PolicyArns = policyDescriptorTypes
}
if c.AssumeRoleSessionName != "" {
assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName
}
if len(c.AssumeRoleTags) > 0 {
var tags []*sts.Tag
for k, v := range c.AssumeRoleTags {
tag := &sts.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
tags = append(tags, tag)
}
assumeRoleProvider.Tags = tags
}
if len(c.AssumeRoleTransitiveTagKeys) > 0 {
assumeRoleProvider.TransitiveTagKeys = aws.StringSlice(c.AssumeRoleTransitiveTagKeys)
}
providers = []awsCredentials.Provider{assumeRoleProvider}
assumeRoleCreds := awsCredentials.NewChainCredentials(providers)
_, err = assumeRoleCreds.Get()
if err != nil {
return nil, c.NewCannotAssumeRoleError(err)
}
return assumeRoleCreds, nil
}
func setOptionalEndpoint(cfg *aws.Config) string {
endpoint := os.Getenv("AWS_METADATA_URL")
if endpoint != "" {
log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint)
cfg.Endpoint = aws.String(endpoint)
return endpoint
}
return ""
}
| [
"\"AWS_METADATA_URL\""
]
| []
| [
"AWS_METADATA_URL"
]
| [] | ["AWS_METADATA_URL"] | go | 1 | 0 | |
lib/metrics.py | import datetime
import json
import os
import sys
import threading
import time
from abc import ABCMeta, abstractmethod
BUILDPACK_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(BUILDPACK_DIR, "lib"))
import buildpackutil # noqa: E402
import database_config # noqa: E402
import psycopg2 # noqa: E402
import requests # noqa: E402
from m2ee import logger, munin # noqa: E402
def int_or_default(value, default=0):
try:
return int(value)
except Exception as e:
logger.debug("Failed to coerce %s to int.", value, exc_info=True)
return default
class MetricsEmitter(metaclass=ABCMeta):
@abstractmethod
def emit(self, stats):
raise NotImplementedError
class LoggingEmitter(MetricsEmitter):
def emit(self, stats):
logger.info("MENDIX-METRICS: " + json.dumps(stats))
class MetricsServerEmitter(MetricsEmitter):
def __init__(self, metrics_url):
self.metrics_url = metrics_url
self.fallback_emitter = LoggingEmitter()
def emit(self, stats):
try:
response = requests.post(self.metrics_url, json=stats, timeout=10)
except Exception as e:
logger.debug(
"Failed to send metrics to trends server.", exc_info=True
)
# Fallback to old pipeline and stdout for now.
# Later, we will want to buffer and resend.
# This will be done in DEP-75.
self.fallback_emitter.emit(stats)
return
if response.status_code != 200:
logger.debug(
"Failed to send metrics to trends server. Falling back to old "
"loggregator based method. Got status code %s "
"for URL %s, with body %s.",
response.status_code,
self.metrics_url,
response.text,
)
self.fallback_emitter.emit(stats)
class MetricsEmitterThread(threading.Thread):
def __init__(self, interval, m2ee):
super(MetricsEmitterThread, self).__init__()
self.interval = interval
self.m2ee = m2ee
self.db = None
if buildpackutil.bypass_loggregator_logging():
logger.info("Metrics are logged direct to metrics server.")
self.emitter = MetricsServerEmitter(
metrics_url=buildpackutil.get_metrics_url()
)
else:
logger.info("Metrics are logged to stdout.")
self.emitter = LoggingEmitter()
def emit(self, stats):
stats["version"] = "1.0"
stats["timestamp"] = datetime.datetime.now().isoformat()
stats["instance_index"] = os.getenv("CF_INSTANCE_INDEX", 0)
self.emitter.emit(stats)
def run(self):
logger.debug(
"Starting metrics emitter with interval %d" % self.interval
)
while True:
stats = {}
try:
if buildpackutil.i_am_primary_instance():
stats = self._inject_database_stats(stats)
stats = self._inject_storage_stats(stats)
stats = self._inject_health(stats)
try:
stats = self._inject_m2ee_stats(stats)
except Exception:
logger.debug("Unable to get metrics from runtime")
self.emit(stats)
except psycopg2.OperationalError as up:
logger.exception("METRICS: error while gathering metrics")
self.emit(
{
"health": {
"health": 0,
"diagnosis": "Database error: %s" % str(up),
}
}
)
except Exception as e:
logger.exception("METRICS: error while gathering metrics")
self.emit(
{
"health": {
"health": 4,
"diagnosis": "Unable to retrieve metrics",
}
}
)
time.sleep(self.interval)
def _inject_health(self, stats):
health = {}
translation = {"healthy": 10, "unknown": 7, "sick": 4, "critical": 0}
stats["health"] = health
try:
health_response = self.m2ee.client.check_health()
if health_response.has_error():
if (
health_response.get_result() == 3
and health_response.get_cause()
== "java.lang.IllegalArgument"
"Exception: Action should not be null"
):
# Because of an incomplete implementation, in Mendix 2.5.4 or
# 2.5.5 this means that the runtime is health-check
# capable, but no health check microflow is defined.
health["health"] = translation["unknown"]
health["diagnosis"] = "No health check microflow defined"
elif (
health_response.get_result()
== health_response.ERR_ACTION_NOT_FOUND
):
# Admin action 'check_health' does not exist.
health["health"] = translation["unknown"]
health["diagnosis"] = "No health check microflow defined"
else:
health["health"] = translation["critical"]
health["diagnosis"] = (
"Health check failed unexpectedly: %s"
% health_response.get_error()
)
else:
feedback = health_response.get_feedback()
health["health"] = translation[feedback["health"]]
health["diagnosis"] = (
feedback["diagnosis"] if "diagnosis" in feedback else ""
)
health["response"] = health_response._json
except Exception as e:
logger.warn("Metrics: Failed to get health status, " + str(e))
health["health"] = translation["critical"]
health["diagnosis"] = "Health check failed unexpectedly: %s" % e
return stats
def _inject_m2ee_stats(self, stats):
m2ee_stats, java_version = munin.get_stats_from_runtime(
self.m2ee.client, self.m2ee.config
)
if "sessions" in m2ee_stats:
m2ee_stats["sessions"]["user_sessions"] = {}
m2ee_stats = munin.augment_and_fix_stats(
m2ee_stats, self.m2ee.runner.get_pid(), java_version
)
critical_logs_count = len(self.m2ee.client.get_critical_log_messages())
m2ee_stats["critical_logs_count"] = critical_logs_count
stats["mendix_runtime"] = m2ee_stats
return stats
def _inject_storage_stats(self, stats):
storage_stats = {}
try:
storage_stats["get_number_of_files"] = self._get_number_of_files()
except Exception as e:
logger.warn(
"Metrics: Failed to retrieve number of files, " + str(e)
)
raise
stats["storage"] = storage_stats
return stats
def _inject_database_stats(self, stats):
database_stats = {}
index_size = self._get_database_index_size()
if index_size:
database_stats["indexes_size"] = index_size
storage = self._get_database_storage()
if storage:
database_stats["storage"] = storage
table_size = self._get_database_table_size()
if table_size:
database_stats["tables_size"] = table_size
mutations_stats = self._get_database_mutations()
if mutations_stats:
database_stats.update(mutations_stats)
stats["database"] = database_stats
return stats
def _get_database_storage(self):
if "DATABASE_DISKSTORAGE" in os.environ:
try:
return float(os.environ["DATABASE_DISKSTORAGE"])
except ValueError:
return None
def _get_database_mutations(self):
conn = self._get_db_conn()
db_config = database_config.get_database_config()
with conn.cursor() as cursor:
cursor.execute(
"SELECT xact_commit, "
" xact_rollback, "
" tup_inserted, "
" tup_updated, "
" tup_deleted "
"FROM pg_stat_database "
"WHERE datname = '%s';" % (db_config["DatabaseName"],)
)
rows = cursor.fetchall()
return {
"xact_commit": int_or_default(rows[0][0]),
"xact_rollback": int_or_default(rows[0][1]),
"tup_inserted": int_or_default(rows[0][2]),
"tup_updated": int_or_default(rows[0][3]),
"tup_deleted": int_or_default(rows[0][4]),
}
return None
def _get_database_table_size(self):
conn = self._get_db_conn()
db_config = database_config.get_database_config()
with conn.cursor() as cursor:
cursor.execute(
"SELECT pg_database_size('%s');" % (db_config["DatabaseName"],)
)
rows = cursor.fetchall()
return int_or_default(rows[0][0])
def _get_database_index_size(self):
conn = self._get_db_conn()
with conn.cursor() as cursor:
cursor.execute(
"""
SELECT SUM(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
(SELECT c.relname AS ctablename,
ipg.relname AS indexname,
x.indnatts AS number_of_columns,
idx_scan,
idx_tup_read,
idx_tup_fetch,
indexrelname,
indisunique
FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid)
AS foo
ON t.tablename = foo.ctablename
WHERE t.schemaname='public';
"""
)
rows = cursor.fetchall()
return int_or_default(rows[0][0])
def _get_number_of_files(self):
conn = self._get_db_conn()
with conn.cursor() as cursor:
cursor.execute(
"SELECT COUNT(id) from system$filedocument WHERE hascontents=true;"
)
rows = cursor.fetchall()
if len(rows) == 0:
raise Exception("Unexpected result from database query")
return int_or_default(rows[0][0])
def _get_size_of_files(self):
conn = self._get_db_conn()
with conn.cursor() as cursor:
try:
cursor.execute(
"SELECT sum(size) from system$filedocument WHERE hascontents=true;"
)
rows = cursor.fetchall()
if len(rows) == 0:
return 0
return int_or_default(rows[0][0])
except Exception as e:
# We ignore errors here, as the information is not available for
# older mendix versions
logger.debug(
"METRICS: Error retrieving file sizes", exc_info=True
)
return 0
def _get_db_conn(self):
if self.db and self.db.closed != 0:
self.db.close()
self.db = None
if not self.db:
db_config = database_config.get_database_config()
if db_config["DatabaseType"] != "PostgreSQL":
raise Exception(
"Metrics only supports postgresql, not %s"
% db_config["DatabaseType"]
)
host_and_port = db_config["DatabaseHost"].split(":")
host = host_and_port[0]
if len(host_and_port) > 1:
port = int(host_and_port[1])
else:
port = 5432
self.db = psycopg2.connect(
"options='-c statement_timeout=60s'",
database=db_config["DatabaseName"],
user=db_config["DatabaseUserName"],
password=db_config["DatabasePassword"],
host=host,
port=port,
connect_timeout=3,
)
self.db.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
)
return self.db
| []
| []
| [
"CF_INSTANCE_INDEX",
"DATABASE_DISKSTORAGE"
]
| [] | ["CF_INSTANCE_INDEX", "DATABASE_DISKSTORAGE"] | python | 2 | 0 | |
io_scene_vrm/misc/glb_factory.py | """
Copyright (c) 2018 iCyP
Released under the MIT license
https://opensource.org/licenses/mit-license.php
"""
import collections
import contextlib
import datetime
import json
import os
import re
import secrets
import string
import struct
import traceback
from collections import OrderedDict
from math import floor
from sys import float_info
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import bmesh
import bpy
from mathutils import Matrix
from .. import vrm_types
from ..gl_constants import GlConstants
from ..vrm_types import nested_json_list_getter as json_list_get
from .glb_bin_collection import GlbBin, GlbBinCollection, ImageBin
from .version import version
from .vrm_helper import find_export_objects, shader_nodes_and_materials
class GlbObj:
class ValidationError(Exception):
pass
class KhrTextureTransform:
def __init__(self, offset: Tuple[float, float], scale: Tuple[float, float]):
self.offset = offset
self.scale = scale
def add_to(self, texture_info: Dict[str, Any]) -> None:
texture_info.update(
{
"extensions": {
"KHR_texture_transform": {
"scale": self.scale,
"offset": self.offset,
}
}
}
)
def __init__(self, export_invisibles: bool, export_only_selections: bool) -> None:
if bpy.ops.vrm.model_validate(
"INVOKE_DEFAULT", show_successful_message=False
) != {"FINISHED"}:
raise self.ValidationError()
self.export_objects = find_export_objects(
export_invisibles, export_only_selections
)
self.vrm_version: Optional[str] = None
self.json_dic: Dict[str, Any] = OrderedDict()
self.bin = b""
self.glb_bin_collector = GlbBinCollection()
self.use_dummy_armature = False
self.export_id = "BlenderVrmAddonExport" + (
"".join(secrets.choice(string.digits) for _ in range(10))
)
self.mesh_name_to_index: Dict[str, int] = {}
armatures = [obj for obj in self.export_objects if obj.type == "ARMATURE"]
if armatures:
self.armature = armatures[0]
else:
dummy_armature_key = self.export_id + "DummyArmatureKey"
bpy.ops.icyp.make_basic_armature(
"EXEC_DEFAULT", custom_property_name=dummy_armature_key
)
for obj in bpy.context.selectable_objects:
if obj.type == "ARMATURE" and dummy_armature_key in obj:
self.export_objects.append(obj)
self.armature = obj
if not self.armature:
raise Exception("Failed to generate default armature")
self.use_dummy_armature = True
self.result: Optional[bytes] = None
def convert_bpy2glb(self, vrm_version: str) -> Optional[bytes]:
self.vrm_version = vrm_version
self.image_to_bin()
self.armature_to_node_and_scenes_dic()
self.material_to_dic()
self.mesh_to_bin_and_dic()
self.json_dic["scene"] = 0
self.gltf_meta_to_dic()
self.vrm_meta_to_dic() # colliderとかmetaとか....
self.finalize()
return self.result
@staticmethod
def axis_blender_to_glb(vec3: Sequence[float]) -> List[float]:
return [vec3[i] * t for i, t in zip([0, 2, 1], [-1, 1, 1])]
def textblock2json(self, armature_key: str, default: Any) -> Any:
if armature_key not in self.armature:
return default
with contextlib.suppress(TypeError):
if self.armature[armature_key] not in bpy.data.texts:
return default
textblock = bpy.data.texts[self.armature[armature_key]]
textblock_str = "".join([line.body for line in textblock.lines])
with contextlib.suppress(json.JSONDecodeError):
return json.loads(
textblock_str,
object_pairs_hook=OrderedDict,
)
return default
def textblock2json_dict(
self, armature_key: str, default: Dict[Any, Any]
) -> Dict[Any, Any]:
result = self.textblock2json(armature_key, default)
return result if isinstance(result, dict) else default
def textblock2json_list(self, armature_key: str, default: List[Any]) -> List[Any]:
result = self.textblock2json(armature_key, default)
return result if isinstance(result, list) else default
def image_to_bin(self) -> None:
# collect used image
used_images = []
used_materials = []
for mesh in [obj for obj in self.export_objects if obj.type == "MESH"]:
for mat in mesh.data.materials:
if mat not in used_materials:
if "vrm_shader" in mat:
del mat["vrm_shader"]
used_materials.append(mat)
# image fetching
for node, mat in shader_nodes_and_materials(used_materials):
if node.node_tree["SHADER"] == "MToon_unversioned":
mat["vrm_shader"] = "MToon_unversioned"
for (
shader_vals
) in vrm_types.MaterialMtoon.texture_kind_exchange_dic.values():
# Support models that were loaded by earlier versions (1.3.5 or earlier), which had this typo
#
# Those models have node.inputs["NomalmapTexture"] instead of "NormalmapTexture". # noqa: SC100
# But 'shader_vals' which comes from MaterialMtoon.texture_kind_exchange_dic is "NormalmapTexture".
# if script reference node.inputs["NormalmapTexture"] in that situation, it will occur error.
# So change it to "NomalmapTexture" which is typo but points to the same thing # noqa: SC100
# in those models.
if (
shader_vals == "NormalmapTexture"
and "NormalmapTexture" not in node.inputs
and "NomalmapTexture" in node.inputs
):
shader_vals = "NomalmapTexture"
if shader_vals == "ReceiveShadow_Texture":
if node.inputs[shader_vals + "_alpha"].links:
n = node.inputs[shader_vals + "_alpha"].links[0].from_node
if n.image not in used_images:
used_images.append(n.image)
elif node.inputs[shader_vals].links:
n = node.inputs[shader_vals].links[0].from_node
if n.image not in used_images:
used_images.append(n.image)
elif node.node_tree["SHADER"] == "GLTF":
mat["vrm_shader"] = "GLTF"
for k in vrm_types.Gltf.TEXTURE_INPUT_NAMES:
if node.inputs[k].links:
n = node.inputs[k].links[0].from_node
if n.image not in used_images:
used_images.append(n.image)
elif node.node_tree["SHADER"] == "TRANSPARENT_ZWRITE":
mat["vrm_shader"] = "TRANSPARENT_ZWRITE"
if node.inputs["Main_Texture"].links:
n = node.inputs["Main_Texture"].links[0].from_node
if n.image not in used_images:
used_images.append(n.image)
else:
# ?
pass
# thumbnail
if self.armature.get("texture") is not None:
image = bpy.data.images[self.armature["texture"]]
if image not in used_images:
used_images.append(image)
image_to_image_index = (
lambda used_image: bpy.data.images.index(used_image)
if used_image in bpy.data.images.items()
else len(bpy.data.images) + used_images.index(used_image)
)
for image in sorted(used_images, key=image_to_image_index):
if image.packed_file is not None:
image_bin = image.packed_file.data
else:
with open(image.filepath_from_user(), "rb") as f:
image_bin = f.read()
name = image.name
filetype = "image/" + image.file_format.lower()
ImageBin(image_bin, name, filetype, self.glb_bin_collector)
def armature_to_node_and_scenes_dic(self) -> None:
nodes = []
scene = []
skins = []
bone_id_dic = {
b.name: bone_id for bone_id, b in enumerate(self.armature.data.bones)
}
def bone_to_node(b_bone: bpy.types.Bone) -> Dict[str, Any]:
parent_head_local = (
b_bone.parent.head_local if b_bone.parent is not None else [0, 0, 0]
)
world_head_local = (
self.armature.matrix_world @ Matrix.Translation(b_bone.head_local)
).to_translation()
parent_world_head_local = (
self.armature.matrix_world @ Matrix.Translation(parent_head_local)
).to_translation()
node = OrderedDict(
{
"name": b_bone.name,
"translation": self.axis_blender_to_glb(
[
world_head_local[i] - parent_world_head_local[i]
for i in range(3)
]
),
# "rotation":[0,0,0,1],
# "scale":[1,1,1],
"children": [bone_id_dic[ch.name] for ch in b_bone.children],
}
)
if len(node["children"]) == 0:
del node["children"]
return node
human_bone_node_names = []
for human_bone in vrm_types.HumanBones.requires + vrm_types.HumanBones.defines:
if (
human_bone in self.armature.data
and self.armature.data[human_bone]
and self.armature.data[human_bone]
in [bone.name for bone in self.armature.data.bones]
):
human_bone_node_names.append(self.armature.data[human_bone])
for bone in self.armature.data.bones:
if bone.parent is not None:
continue
has_human_bone = False
if bone.name in human_bone_node_names:
has_human_bone = True
skin: Dict[str, Any] = {"joints": []}
root_bone_id = bone_id_dic[bone.name]
skin["joints"].append(root_bone_id)
skin["skeleton"] = root_bone_id
scene.append(root_bone_id)
nodes.append(bone_to_node(bone))
bone_children = list(bone.children)
while bone_children:
child = bone_children.pop()
if child.name in human_bone_node_names:
has_human_bone = True
nodes.append(bone_to_node(child))
skin["joints"].append(bone_id_dic[child.name])
bone_children += list(child.children)
nodes = sorted(nodes, key=lambda node: bone_id_dic[node["name"]])
if has_human_bone:
skins.append(skin)
for skin in skins:
skin_invert_matrix_bin = b""
f_4x4_packer = struct.Struct("<16f").pack
for node_id in skin["joints"]:
bone_name = nodes[node_id]["name"]
bone_glb_world_pos = self.axis_blender_to_glb(
(
self.armature.matrix_world
@ Matrix.Translation(
self.armature.data.bones[bone_name].head_local
)
).to_translation()
)
inv_matrix = [
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
0,
-bone_glb_world_pos[0],
-bone_glb_world_pos[1],
-bone_glb_world_pos[2],
1,
]
skin_invert_matrix_bin += f_4x4_packer(*inv_matrix)
im_bin = GlbBin(
skin_invert_matrix_bin,
"MAT4",
GlConstants.FLOAT,
len(skin["joints"]),
None,
self.glb_bin_collector,
)
skin["inverseBindMatrices"] = im_bin.accessor_id
self.json_dic.update({"scenes": [{"nodes": scene}]})
self.json_dic.update({"nodes": nodes})
self.json_dic.update({"skins": skins})
def material_to_dic(self) -> None:
glb_material_list = []
vrm_material_props_list = []
gltf2_io_texture_images: List[Tuple[str, bytes, int]] = []
image_id_dic = {
image.name: image.image_id for image in self.glb_bin_collector.image_bins
}
sampler_dic: Dict[Tuple[int, int, int, int], int] = OrderedDict()
texture_dic: Dict[Tuple[int, int], int] = OrderedDict()
# region texture func
def add_texture(image_name: str, wrap_type: int, filter_type: int) -> int:
sampler_dic_key = (wrap_type, wrap_type, filter_type, filter_type)
if sampler_dic_key not in sampler_dic.keys():
sampler_dic.update({sampler_dic_key: len(sampler_dic)})
if (
image_id_dic[image_name],
sampler_dic[sampler_dic_key],
) not in texture_dic.keys():
texture_dic.update(
{
(
image_id_dic[image_name],
sampler_dic[sampler_dic_key],
): len(texture_dic)
}
)
return texture_dic[(image_id_dic[image_name], sampler_dic[sampler_dic_key])]
def apply_texture_and_sampler_to_dic() -> None:
if sampler_dic:
sampler_list = self.json_dic["samplers"] = []
for sampler in sampler_dic.keys():
sampler_list.append(
{
"wrapS": sampler[0],
"wrapT": sampler[1],
"magFilter": sampler[2],
"minFilter": sampler[3],
}
)
if texture_dic:
textures = []
for tex in texture_dic:
texture = {"sampler": tex[1], "source": tex[0]}
textures.append(texture)
self.json_dic.update({"textures": textures})
# region function separate by shader
def pbr_fallback(
b_mat: bpy.types.Material,
base_color: Optional[Sequence[float]] = None,
metalness: Optional[float] = None,
roughness: Optional[float] = None,
base_color_texture: Optional[Tuple[str, int, int]] = None,
metallic_roughness_texture: Optional[Tuple[str, int, int]] = None,
normal_texture: Optional[Tuple[str, int, int]] = None,
normal_texture_scale: Optional[float] = None,
occlusion_texture: Optional[Tuple[str, int, int]] = None,
emissive_texture: Optional[Tuple[str, int, int]] = None,
transparent_method: str = "OPAQUE",
transparency_cutoff: Optional[float] = 0.5,
unlit: Optional[bool] = None,
doublesided: bool = False,
texture_transform: Optional[GlbObj.KhrTextureTransform] = None,
) -> Dict[str, Any]:
"""transparent_method = {"OPAQUE","MASK","BLEND"}"""
if base_color is None:
base_color = (1, 1, 1, 1)
if metalness is None:
metalness = 0
if roughness is None:
roughness = 0.9
if unlit is None:
unlit = True
fallback_dic = {
"name": b_mat.name,
"pbrMetallicRoughness": {
"baseColorFactor": base_color,
"metallicFactor": metalness,
"roughnessFactor": roughness,
},
}
for k, v in fallback_dic["pbrMetallicRoughness"].items():
if v is None:
del fallback_dic["pbrMetallicRoughness"][k]
if base_color_texture is not None:
texture_info = {
"index": add_texture(*base_color_texture),
"texCoord": 0,
}
if texture_transform is not None:
texture_transform.add_to(texture_info)
fallback_dic["pbrMetallicRoughness"].update(
{"baseColorTexture": texture_info} # TODO:
)
if metallic_roughness_texture is not None:
texture_info = {
"index": add_texture(*metallic_roughness_texture),
"texCoord": 0, # TODO:
}
if texture_transform is not None:
texture_transform.add_to(texture_info)
fallback_dic["pbrMetallicRoughness"].update(
{"metallicRoughnessTexture": texture_info}
)
if normal_texture is not None:
normal_texture_info: Dict[str, Union[int, float]] = {
"index": add_texture(*normal_texture),
"texCoord": 0, # TODO:
}
if normal_texture_scale is not None:
normal_texture_info["scale"] = normal_texture_scale
if texture_transform is not None:
texture_transform.add_to(normal_texture_info)
fallback_dic["normalTexture"] = normal_texture_info
if occlusion_texture is not None:
occlusion_texture_info = {
"index": add_texture(*occlusion_texture),
"texCoord": 0, # TODO:
}
if texture_transform is not None:
texture_transform.add_to(occlusion_texture_info)
fallback_dic["occlusionTexture"] = occlusion_texture_info
if emissive_texture is not None:
emissive_texture_info = {
"index": add_texture(*emissive_texture),
"texCoord": 0, # TODO:
}
if texture_transform is not None:
texture_transform.add_to(emissive_texture_info)
fallback_dic["emissiveTexture"] = emissive_texture_info
fallback_dic["alphaMode"] = transparent_method
if transparent_method == "MASK":
fallback_dic["alphaCutoff"] = (
0.5 if transparency_cutoff is None else transparency_cutoff
)
if unlit:
fallback_dic["extensions"] = {"KHR_materials_unlit": {}}
fallback_dic["doubleSided"] = doublesided
return fallback_dic
# region util func
def get_texture_name_and_sampler_type(
shader_node: bpy.types.Node, input_socket_name: str
) -> Optional[Tuple[str, int, int]]:
if (
input_socket_name == "NormalmapTexture"
and "NormalmapTexture" not in shader_node.inputs
and "NomalmapTexture" in shader_node.inputs
):
input_socket_name = "NomalmapTexture"
if (
not shader_node.inputs.get(input_socket_name)
or not shader_node.inputs.get(input_socket_name).links
):
return None
tex_name = (
shader_node.inputs.get(input_socket_name).links[0].from_node.image.name
)
# blender is ('Linear', 'Closest', 'Cubic', 'Smart') glTF is Linear, Closest
if (
shader_node.inputs.get(input_socket_name)
.links[0]
.from_node.interpolation
== "Closest"
):
filter_type = GlConstants.NEAREST
else:
filter_type = GlConstants.LINEAR
# blender is ('REPEAT', 'EXTEND', 'CLIP') glTF is CLAMP_TO_EDGE,MIRRORED_REPEAT,REPEAT
if (
shader_node.inputs.get(input_socket_name).links[0].from_node.extension
== "REPEAT"
):
wrap_type = GlConstants.REPEAT
else:
wrap_type = GlConstants.CLAMP_TO_EDGE
return tex_name, wrap_type, filter_type
def get_float_value(
shader_node: bpy.types.Node, input_socket_name: str
) -> Optional[float]:
float_val = None
if shader_node.inputs.get(input_socket_name):
if shader_node.inputs.get(input_socket_name).links:
float_val = (
shader_node.inputs.get(input_socket_name)
.links[0]
.from_node.outputs[0]
.default_value
)
else:
float_val = shader_node.inputs.get(input_socket_name).default_value
return float_val
def get_rgba_val(
shader_node: bpy.types.Node, input_socket_name: str
) -> Optional[List[float]]:
rgba_val = None
if shader_node.inputs.get(input_socket_name):
if shader_node.inputs.get(input_socket_name).links:
rgba_val = [
shader_node.inputs.get(input_socket_name)
.links[0]
.from_node.outputs[0]
.default_value[i]
for i in range(4)
]
else:
rgba_val = [
shader_node.inputs.get(input_socket_name).default_value[i]
for i in range(4)
]
return rgba_val
# endregion util func
def make_mtoon_unversioned_extension_dic(
b_mat: bpy.types.Material, mtoon_shader_node: bpy.types.Node
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
mtoon_dic: Dict[str, Any] = OrderedDict()
mtoon_dic["name"] = b_mat.name
mtoon_dic["shader"] = "VRM/MToon"
mtoon_dic["keywordMap"] = {}
keyword_map = mtoon_dic["keywordMap"]
mtoon_dic["tagMap"] = {}
tag_map = mtoon_dic["tagMap"]
mtoon_dic["floatProperties"] = OrderedDict()
mtoon_float_dic: Dict[str, float] = mtoon_dic["floatProperties"]
mtoon_dic["vectorProperties"] = OrderedDict()
mtoon_vector_dic: Dict[str, List[float]] = mtoon_dic["vectorProperties"]
mtoon_dic["textureProperties"] = OrderedDict()
mtoon_texture_dic = mtoon_dic["textureProperties"]
outline_width_mode = 0
outline_color_mode = 0
for float_key, float_prop in [
(k, val)
for k, val in vrm_types.MaterialMtoon.float_props_exchange_dic.items()
if val is not None
]:
float_val = get_float_value(mtoon_shader_node, float_prop)
if float_val is not None:
mtoon_float_dic[float_key] = float_val
if float_key == "_OutlineWidthMode":
outline_width_mode = min(max(round(float_val), 0), 2)
mtoon_float_dic[float_key] = int(outline_width_mode)
if float_key == "_OutlineColorMode":
outline_color_mode = min(max(round(float_val), 0), 1)
mtoon_float_dic[float_key] = int(outline_color_mode)
def outline_keyword_set(
width_world: bool,
width_screen: bool,
color_fixed: bool,
color_mixed: bool,
) -> None:
if width_world:
keyword_map["MTOON_OUTLINE_WIDTH_WORLD"] = width_world
elif width_screen:
keyword_map["MTOON_OUTLINE_WIDTH_SCREEN"] = width_screen
if color_fixed:
keyword_map["MTOON_OUTLINE_COLOR_FIXED"] = color_fixed
elif color_mixed:
keyword_map["MTOON_OUTLINE_COLOR_MIXED"] = color_mixed
if outline_width_mode < 1:
outline_keyword_set(False, False, False, False)
elif outline_width_mode < 2:
if outline_color_mode < 1:
outline_keyword_set(True, False, True, False)
else:
outline_keyword_set(True, False, False, True)
elif outline_width_mode >= 2:
if outline_color_mode < 1:
outline_keyword_set(False, True, True, False)
else:
outline_keyword_set(False, True, False, True)
vec_props = list(
dict.fromkeys(
vrm_types.MaterialMtoon.vector_props_exchange_dic.values()
)
)
for (
remove_vec_prop
) in vrm_types.MaterialMtoon.texture_kind_exchange_dic.values():
if remove_vec_prop in vec_props:
vec_props.remove(remove_vec_prop)
for vector_key, vector_prop in [
(k, v)
for k, v in vrm_types.MaterialMtoon.vector_props_exchange_dic.items()
if v in vec_props
]:
vector_val = get_rgba_val(mtoon_shader_node, vector_prop)
if vector_val is not None:
mtoon_vector_dic[vector_key] = vector_val
use_normalmap = False
main_texture: Optional[Tuple[str, int, int]] = None
main_texture_transform: Optional[GlbObj.KhrTextureTransform] = None
normal_texture: Optional[Tuple[str, int, int]] = None
emissive_texture: Optional[Tuple[str, int, int]] = None
for (
texture_key,
texture_prop,
) in vrm_types.MaterialMtoon.texture_kind_exchange_dic.items():
tex = get_texture_name_and_sampler_type(mtoon_shader_node, texture_prop)
if tex is None:
continue
mtoon_texture_dic[texture_key] = add_texture(*tex)
mtoon_vector_dic[texture_key] = [0, 0, 1, 1]
if texture_prop == "MainTexture":
main_texture = tex
uv_offset_scaling_node = None
try:
uv_offset_scaling_node = (
mtoon_shader_node.inputs[texture_prop]
.links[0]
.from_node.inputs[0]
.links[0]
.from_node
)
except IndexError:
uv_offset_scaling_node = None
if (
uv_offset_scaling_node is not None
and uv_offset_scaling_node.type == "MAPPING'"
):
if bpy.app.version <= (2, 80):
mtoon_vector_dic[texture_key] = [
uv_offset_scaling_node.translation[0],
uv_offset_scaling_node.translation[1],
uv_offset_scaling_node.scale[0],
uv_offset_scaling_node.scale[1],
]
else:
mtoon_vector_dic[texture_key] = [
uv_offset_scaling_node.inputs["Location"].default_value[
0
],
uv_offset_scaling_node.inputs["Location"].default_value[
1
],
uv_offset_scaling_node.inputs["Scale"].default_value[0],
uv_offset_scaling_node.inputs["Scale"].default_value[1],
]
else:
mtoon_vector_dic[texture_key] = [0, 0, 1, 1]
main_texture_transform = GlbObj.KhrTextureTransform(
offset=(
mtoon_vector_dic[texture_key][0],
mtoon_vector_dic[texture_key][1],
),
scale=(
mtoon_vector_dic[texture_key][2],
mtoon_vector_dic[texture_key][3],
),
)
elif (
# Support older version that had typo
texture_prop
in ["NormalmapTexture", "NomalmapTexture"]
):
use_normalmap = True
normal_texture = tex
elif texture_prop == "Emission_Texture":
emissive_texture = tex
def material_prop_setter(
blend_mode: int,
src_blend: int,
dst_blend: int,
z_write: int,
alphatest: bool,
render_queue: int,
render_type: str,
) -> None:
mtoon_float_dic["_BlendMode"] = blend_mode
mtoon_float_dic["_SrcBlend"] = src_blend
mtoon_float_dic["_DstBlend"] = dst_blend
mtoon_float_dic["_ZWrite"] = z_write
if alphatest:
keyword_map.update({"_ALPHATEST_ON": alphatest})
mtoon_dic["renderQueue"] = render_queue
tag_map["RenderType"] = render_type
if b_mat.blend_method == "OPAQUE":
material_prop_setter(0, 1, 0, 1, False, -1, "Opaque")
elif b_mat.blend_method == "CLIP":
material_prop_setter(1, 1, 0, 1, True, 2450, "TransparentCutout")
mtoon_float_dic["_Cutoff"] = b_mat.alpha_threshold
else: # transparent and Z_TRANSPARENCY or Raytrace
material_prop_setter(2, 5, 10, 0, False, 3000, "Transparent")
keyword_map.update(
{"_ALPHABLEND_ON": b_mat.blend_method not in ("OPAQUE", "CLIP")}
)
keyword_map.update({"_ALPHAPREMULTIPLY_ON": False})
mtoon_float_dic["_MToonVersion"] = vrm_types.MaterialMtoon.version
mtoon_float_dic["_CullMode"] = (
2 if b_mat.use_backface_culling else 0
) # no cull or bf cull
mtoon_float_dic[
"_OutlineCullMode"
] = 1 # front face cull (for invert normal outline)
mtoon_float_dic["_DebugMode"] = 0
keyword_map.update({"MTOON_DEBUG_NORMAL": False})
keyword_map.update({"MTOON_DEBUG_LITSHADERATE": False})
if use_normalmap:
keyword_map.update({"_NORMALMAP": use_normalmap})
# for pbr_fallback
if b_mat.blend_method == "OPAQUE":
transparent_method = "OPAQUE"
transparency_cutoff = None
elif b_mat.blend_method == "CLIP":
transparent_method = "MASK"
transparency_cutoff = b_mat.alpha_threshold
else:
transparent_method = "BLEND"
transparency_cutoff = None
pbr_dic = pbr_fallback(
b_mat,
base_color=mtoon_vector_dic.get("_Color"),
base_color_texture=main_texture,
normal_texture=normal_texture,
normal_texture_scale=mtoon_float_dic.get("_BumpScale"),
emissive_texture=emissive_texture,
transparent_method=transparent_method,
transparency_cutoff=transparency_cutoff,
doublesided=not b_mat.use_backface_culling,
texture_transform=main_texture_transform,
)
vrm_version = self.vrm_version
if vrm_version is None:
raise Exception("vrm version is None")
if vrm_version.startswith("1."):
mtoon_ext_dic: Dict[str, Any] = {}
mtoon_ext_dic["properties"] = {}
mt_prop = mtoon_ext_dic["properties"]
mt_prop["version"] = "3.2"
blendmode = mtoon_float_dic.get("_BlendMode")
if blendmode == 0:
blendmode_str = "opaque"
elif blendmode == 1:
blendmode_str = "cutout"
else:
blendmode_str = "transparent"
# TODO transparentWithZWrite
mt_prop["renderMode"] = blendmode_str
mt_prop["cullMode"] = (
# mtoon_float_dic.get("_CullMode") == "back"
"on"
if b_mat.use_backface_culling
else "off"
) # no cull or bf cull
# TODO unknown number
mt_prop["renderQueueOffsetNumber"] = 0
mt_prop["litFactor"] = mtoon_vector_dic.get("_Color")
mt_prop["litMultiplyTexture"] = mtoon_texture_dic.get("_MainTex")
mt_prop["shadeFactor"] = mtoon_vector_dic.get("_ShadeColor")
mt_prop["shadeMultiplyTexture"] = mtoon_texture_dic.get("_ShadeTexture")
mt_prop["cutoutThresholdFactor"] = mtoon_float_dic.get("_Cutoff")
mt_prop["shadingShiftFactor"] = mtoon_float_dic.get("_ShadeShift")
mt_prop["shadingToonyFactor"] = mtoon_float_dic.get("_ShadeToony")
mt_prop["shadowReceiveMultiplierFactor"] = mtoon_float_dic.get(
"_ReceiveShadowRate"
)
mt_prop[
"shadowReceiveMultiplierMultiplyTexture"
] = mtoon_texture_dic.get("_ReceiveShadowTexture")
mt_prop["litAndShadeMixingMultiplierFactor"] = mtoon_float_dic.get(
"_ShadingGradeRate"
)
mt_prop[
"litAndShadeMixingMultiplierMultiplyTexture"
] = mtoon_texture_dic.get("_ShadingGradeTexture")
mt_prop["lightColorAttenuationFactor"] = mtoon_float_dic.get(
"_LightColorAttenuation"
)
mt_prop["giIntensityFactor"] = mtoon_float_dic.get(
"_IndirectLightIntensity"
)
mt_prop["normalTexture"] = mtoon_texture_dic.get("_BumpMap")
mt_prop["normalScaleFactor"] = mtoon_float_dic.get("_BumpScale")
mt_prop["emissionFactor"] = mtoon_vector_dic.get("_EmissionColor")
mt_prop["emissionMultiplyTexture"] = mtoon_texture_dic.get(
"_EmissionMap"
)
mt_prop["additiveTexture"] = mtoon_texture_dic.get("_SphereAdd")
mt_prop["rimFactor"] = mtoon_vector_dic.get("_RimColor")
mt_prop["rimMultiplyTexture"] = mtoon_texture_dic.get("_RimTexture")
mt_prop["rimLightingMixFactor"] = mtoon_float_dic.get("_RimLightingMix")
mt_prop["rimFresnelPowerFactor"] = mtoon_float_dic.get(
"_RimFresnelPower"
)
mt_prop["rimLiftFactor"] = mtoon_float_dic.get("_RimLift")
mt_prop["outlineWidthMode"] = [
"none",
"worldCoordinates",
"screenCoordinates",
][floor(mtoon_float_dic.get("_OutlineWidthMode", 0))]
mt_prop["outlineWidthFactor"] = mtoon_vector_dic.get("_OutlineColor")
mt_prop["outlineWidthMultiplyTexture"] = mtoon_texture_dic.get(
"_OutlineWidthTexture"
)
mt_prop["outlineScaledMaxDistanceFactor"] = mtoon_float_dic.get(
"_OutlineScaledMaxDistance"
)
mt_prop["outlineColorMode"] = ["fixedColor", "mixedLighting"][
floor(mtoon_float_dic.get("_OutlineLightingMix", 0))
]
mt_prop["outlineFactor"] = mtoon_float_dic.get("_OutlineWidth")
mt_prop["outlineLightingMixFactor"] = mtoon_float_dic.get(
"OutlineLightingMix"
)
uv_transforms = mtoon_vector_dic.get("_MainTex")
if uv_transforms is None:
uv_transforms = [0, 0, 1, 1]
mt_prop["mainTextureLeftBottomOriginOffset"] = uv_transforms[0:2]
mt_prop["mainTextureLeftBottomOriginScale"] = uv_transforms[2:4]
mt_prop["uvAnimationMaskTexture"] = mtoon_texture_dic.get(
"_UvAnimMaskTexture"
)
mt_prop["uvAnimationScrollXSpeedFactor"] = mtoon_float_dic.get(
"_UvAnimScrollX"
)
mt_prop["uvAnimationScrollYSpeedFactor"] = mtoon_float_dic.get(
"_UvAnimScrollY"
)
mt_prop["uvAnimationRotationSpeedFactor"] = mtoon_float_dic.get(
"_UvAnimRotation"
)
garbage_list = []
for k, v in mt_prop.items():
if v is None:
garbage_list.append(k)
for garbage in garbage_list:
mt_prop.pop(garbage)
pbr_dic["extensions"].update({"VRMC_materials_mtoon": mtoon_ext_dic})
return mtoon_dic, pbr_dic
def make_gltf_mat_dic(
b_mat: bpy.types.Material, gltf_shader_node: bpy.types.Node
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
gltf_dic = OrderedDict()
gltf_dic["name"] = b_mat.name
gltf_dic["shader"] = "VRM_USE_GLTFSHADER"
gltf_dic["keywordMap"] = {}
gltf_dic["tagMap"] = {}
gltf_dic["floatProperties"] = {}
gltf_dic["vectorProperties"] = {}
gltf_dic["textureProperties"] = {}
gltf_dic["extras"] = {"VRM_Addon_for_Blender_legacy_gltf_material": {}}
if b_mat.blend_method == "OPAQUE":
transparent_method = "OPAQUE"
transparency_cutoff = None
elif b_mat.blend_method == "CLIP":
transparent_method = "MASK"
transparency_cutoff = b_mat.alpha_threshold
else:
transparent_method = "BLEND"
transparency_cutoff = None
unlit_value = get_float_value(gltf_shader_node, "unlit")
if unlit_value is None:
unlit = None
else:
unlit = unlit_value > 0.5
pbr_dic = pbr_fallback(
b_mat,
base_color=get_rgba_val(gltf_shader_node, "base_Color"),
metalness=get_float_value(gltf_shader_node, "metallic"),
roughness=get_float_value(gltf_shader_node, "roughness"),
base_color_texture=get_texture_name_and_sampler_type(
gltf_shader_node, "color_texture"
),
metallic_roughness_texture=get_texture_name_and_sampler_type(
gltf_shader_node, "metallic_roughness_texture"
),
transparent_method=transparent_method,
transparency_cutoff=transparency_cutoff,
unlit=unlit,
doublesided=not b_mat.use_backface_culling,
)
def pbr_tex_add(texture_type: str, socket_name: str) -> None:
img = get_texture_name_and_sampler_type(gltf_shader_node, socket_name)
if img is not None:
pbr_dic[texture_type] = {"index": add_texture(*img), "texCoord": 0}
else:
print(socket_name)
pbr_tex_add("normalTexture", "normal")
pbr_tex_add("emissiveTexture", "emissive_texture")
pbr_tex_add("occlusionTexture", "occlusion_texture")
emissive_factor = get_rgba_val(gltf_shader_node, "emissive_color")
if emissive_factor is None:
emissive_factor = [0, 0, 0]
else:
emissive_factor = emissive_factor[0:3]
pbr_dic["emissiveFactor"] = emissive_factor
return gltf_dic, pbr_dic
def make_transzw_mat_dic(
b_mat: bpy.types.Material, transzw_shader_node: bpy.types.Node
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
zw_dic = OrderedDict()
zw_dic["name"] = b_mat.name
zw_dic["shader"] = "VRM/UnlitTransparentZWrite"
zw_dic["renderQueue"] = 2600
zw_dic["keywordMap"] = {}
zw_dic["tagMap"] = {"RenderType": "Transparent"}
zw_dic["floatProperties"] = {}
zw_dic["vectorProperties"] = {}
zw_dic["textureProperties"] = {}
color_tex = get_texture_name_and_sampler_type(
transzw_shader_node, "Main_Texture"
)
if color_tex is not None:
zw_dic["textureProperties"] = {"_MainTex": add_texture(*color_tex)}
zw_dic["vectorProperties"] = {"_MainTex": [0, 0, 1, 1]}
pbr_dic = pbr_fallback(
b_mat, base_color_texture=color_tex, transparent_method="BLEND"
)
return zw_dic, pbr_dic
def add_gltf2_io_texture(
gltf2_io_texture_info: Any,
) -> Dict[str, Union[int, float]]:
image = gltf2_io_texture_info.index.source
found = False
for (name, data, index) in gltf2_io_texture_images:
if name != image.name or data != image.buffer_view.data:
continue
image_index = index
image_name = {value: key for key, value in image_id_dic.items()}[
image_index
]
found = True
break
if not found:
image_index = self.glb_bin_collector.get_new_image_id()
gltf2_io_texture_images.append(
(image.name, image.buffer_view.data, image_index)
)
image_base_name = re.sub(
r"^BlenderVrmAddonImport[0-9]+Image[0-9]+_", "", image.name
)
for count in range(100000):
image_name = image_base_name
if count:
image_name += "." + str(count)
if image_name not in image_id_dic:
break
image_id_dic[image_name] = image_index
ImageBin(
image.buffer_view.data,
image_name,
image.mime_type,
self.glb_bin_collector,
)
sampler = gltf2_io_texture_info.index.sampler
if sampler is None:
sampler_dic_key = (
GlConstants.REPEAT,
GlConstants.REPEAT,
GlConstants.LINEAR,
GlConstants.LINEAR,
)
else:
sampler_dic_key = (
sampler.wrap_s or GlConstants.REPEAT,
sampler.wrap_t or GlConstants.REPEAT,
sampler.mag_filter or GlConstants.LINEAR,
sampler.min_filter or GlConstants.LINEAR,
)
# VRoid Hub may not support a mipmap
if sampler_dic_key[3] in [
GlConstants.NEAREST_MIPMAP_LINEAR,
GlConstants.NEAREST_MIPMAP_NEAREST,
]:
sampler_dic_key = sampler_dic_key[0:3] + (GlConstants.NEAREST,)
elif sampler_dic_key[3] in [
GlConstants.LINEAR_MIPMAP_NEAREST,
GlConstants.LINEAR_MIPMAP_LINEAR,
]:
sampler_dic_key = sampler_dic_key[0:3] + (GlConstants.LINEAR,)
if sampler_dic_key not in sampler_dic.keys():
sampler_dic.update({sampler_dic_key: len(sampler_dic)})
if (image_index, sampler_dic[sampler_dic_key]) not in texture_dic.keys():
texture_dic.update(
{(image_index, sampler_dic[sampler_dic_key]): len(texture_dic)}
)
texture_info: Dict[str, Union[int, float]] = {
"index": texture_dic[(image_index, sampler_dic[sampler_dic_key])],
"texCoord": 0, # TODO
}
if hasattr(gltf2_io_texture_info, "scale") and isinstance(
gltf2_io_texture_info.scale, (int, float)
):
texture_info["scale"] = gltf2_io_texture_info.scale
if hasattr(gltf2_io_texture_info, "strength") and isinstance(
gltf2_io_texture_info.strength, (int, float)
):
texture_info["strength"] = gltf2_io_texture_info.strength
return texture_info
def make_non_vrm_mat_dic(
b_mat: bpy.types.Material,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
vrm_dic = {
"name": b_mat.name,
"shader": "VRM_USE_GLTFSHADER",
"keywordMap": {},
"tagMap": {},
"floatProperties": {},
"vectorProperties": {},
"textureProperties": {},
}
fallback = (vrm_dic, {"name": b_mat.name})
pbr_dic: Dict[str, Any] = {}
pbr_dic["name"] = b_mat.name
if bpy.app.version < (2, 83):
return fallback
try:
from io_scene_gltf2.blender.exp.gltf2_blender_gather_materials import (
gather_material,
) # pyright: reportMissingImports=false
except ImportError as e:
print(f"Failed to import glTF 2.0 Add-on: {e}")
return fallback
gltf2_io_material: Optional[Any] = None
export_settings: Dict[str, Any] = {
"timestamp": datetime.datetime.now(),
"gltf_materials": True,
"gltf_format": "GLB",
"gltf_image_format": "AUTO",
"gltf_extras": True,
"gltf_user_extensions": [],
"gltf_binary": bytearray(),
}
try:
if bpy.app.version >= (2, 91):
# https://github.com/KhronosGroup/glTF-Blender-IO/blob/abd8380e19dbe5e5fb9042513ad6b744032bc9bc/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_materials.py#L32
gltf2_io_material = gather_material(b_mat, export_settings)
else:
# https://github.com/KhronosGroup/glTF-Blender-IO/blob/ac3471cae42b34fc69fda75fa404117272fa9560/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_materials.py#L32
gltf2_io_material = gather_material(
b_mat, not b_mat.use_backface_culling, export_settings
)
if isinstance(gltf2_io_material.alpha_cutoff, (int, float)):
pbr_dic["alphaCutoff"] = gltf2_io_material.alpha_cutoff
if isinstance(gltf2_io_material.alpha_mode, str):
pbr_dic["alphaMode"] = gltf2_io_material.alpha_mode
if isinstance(gltf2_io_material.double_sided, bool):
pbr_dic["doubleSided"] = gltf2_io_material.double_sided
if isinstance(gltf2_io_material.emissive_factor, collections.Sequence):
pbr_dic["emissiveFactor"] = gltf2_io_material.emissive_factor
if gltf2_io_material.emissive_texture is not None:
pbr_dic["emissiveTexture"] = add_gltf2_io_texture(
gltf2_io_material.emissive_texture
)
if isinstance(gltf2_io_material.extensions, dict):
pbr_dic["extensions"] = {}
# https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_materials_unli
if (
gltf2_io_material.extensions.get("KHR_materials_unlit")
is not None
):
pbr_dic["extensions"].update({"KHR_materials_unlit": {}})
if gltf2_io_material.normal_texture is not None:
pbr_dic["normalTexture"] = add_gltf2_io_texture(
gltf2_io_material.normal_texture
)
if gltf2_io_material.occlusion_texture is not None:
pbr_dic["occlusionTexture"] = add_gltf2_io_texture(
gltf2_io_material.occlusion_texture
)
if gltf2_io_material.pbr_metallic_roughness is not None:
pbr_metallic_roughness: Dict[str, Any] = {}
if isinstance(
gltf2_io_material.pbr_metallic_roughness.base_color_factor,
collections.Sequence,
):
pbr_metallic_roughness[
"baseColorFactor"
] = gltf2_io_material.pbr_metallic_roughness.base_color_factor
if (
gltf2_io_material.pbr_metallic_roughness.base_color_texture
is not None
):
pbr_metallic_roughness[
"baseColorTexture"
] = add_gltf2_io_texture(
gltf2_io_material.pbr_metallic_roughness.base_color_texture
)
if isinstance(
gltf2_io_material.pbr_metallic_roughness.metallic_factor,
(int, float),
):
pbr_metallic_roughness[
"metallicFactor"
] = gltf2_io_material.pbr_metallic_roughness.metallic_factor
if (
gltf2_io_material.pbr_metallic_roughness.metallic_roughness_texture
is not None
):
pbr_metallic_roughness[
"metallicRoughnessTexture"
] = add_gltf2_io_texture(
gltf2_io_material.pbr_metallic_roughness.metallic_roughness_texture
)
if isinstance(
gltf2_io_material.pbr_metallic_roughness.roughness_factor,
(int, float),
):
pbr_metallic_roughness[
"roughnessFactor"
] = gltf2_io_material.pbr_metallic_roughness.roughness_factor
pbr_dic["pbrMetallicRoughness"] = pbr_metallic_roughness
except KeyError as e:
traceback.print_exc()
print(f"glTF Material KeyError: {e}")
return fallback
except TypeError as e:
traceback.print_exc()
print(f"glTF Material TypeError: {e}")
return fallback
except Exception as e:
traceback.print_exc()
print(f"glTF Material Exception: {e}")
return fallback
return vrm_dic, pbr_dic
# endregion function separate by shader
used_materials = []
for mesh in [obj for obj in self.export_objects if obj.type == "MESH"]:
for mat in mesh.data.materials:
if mat not in used_materials:
used_materials.append(mat)
for b_mat in used_materials:
material_properties_dic: Dict[str, Any] = {}
pbr_dic: Dict[str, Any] = {}
if b_mat.get("vrm_shader") == "MToon_unversioned":
for node in b_mat.node_tree.nodes:
if node.type == "OUTPUT_MATERIAL":
mtoon_shader_node = node.inputs["Surface"].links[0].from_node
(
material_properties_dic,
pbr_dic,
) = make_mtoon_unversioned_extension_dic(
b_mat, mtoon_shader_node
)
break
elif b_mat.get("vrm_shader") == "GLTF":
for node in b_mat.node_tree.nodes:
if node.type == "OUTPUT_MATERIAL":
gltf_shader_node = node.inputs["Surface"].links[0].from_node
material_properties_dic, pbr_dic = make_gltf_mat_dic(
b_mat, gltf_shader_node
)
break
elif b_mat.get("vrm_shader") == "TRANSPARENT_ZWRITE":
for node in b_mat.node_tree.nodes:
if node.type == "OUTPUT_MATERIAL":
zw_shader_node = node.inputs["Surface"].links[0].from_node
material_properties_dic, pbr_dic = make_transzw_mat_dic(
b_mat, zw_shader_node
)
break
else:
material_properties_dic, pbr_dic = make_non_vrm_mat_dic(b_mat)
glb_material_list.append(pbr_dic)
vrm_material_props_list.append(material_properties_dic)
apply_texture_and_sampler_to_dic()
self.json_dic.update({"materials": glb_material_list})
vrm_version = self.vrm_version
if vrm_version is None:
raise Exception("vrm version is None")
if vrm_version.startswith("0."):
self.json_dic.update(
{"extensions": {"VRM": {"materialProperties": vrm_material_props_list}}}
)
def joint_id_from_node_name_solver(
self, node_name: str, node_id_dic: Dict[str, int]
) -> int:
try:
node_id = node_id_dic[node_name]
joints = self.json_dic["skins"][0]["joints"]
if not isinstance(joints, list):
raise Exception("joints is not list")
return joints.index(node_id)
except (ValueError, KeyError):
print(f"{node_name} bone may be not exist")
return -1 # 存在しないボーンを指してる場合は-1を返す
@staticmethod
def fetch_morph_vertex_normal_difference(
mesh_data: bpy.types.Mesh,
) -> Dict[str, List[List[float]]]:
morph_normal_diff_dic = {}
vert_base_normal_dic = OrderedDict()
for kb in mesh_data.shape_keys.key_blocks:
vert_base_normal_dic.update({kb.name: kb.normals_vertex_get()})
reference_key_name = mesh_data.shape_keys.reference_key.name
for k, v in vert_base_normal_dic.items():
if k == reference_key_name:
continue
values = []
for vert_morph_normal, vert_base_normal in zip(
zip(*[iter(v)] * 3),
zip(*[iter(vert_base_normal_dic[reference_key_name])] * 3),
):
values.append(
[vert_morph_normal[i] - vert_base_normal[i] for i in range(3)]
)
morph_normal_diff_dic.update({k: values})
return morph_normal_diff_dic
def mesh_to_bin_and_dic(self) -> None:
self.json_dic["meshes"] = []
vrm_version = self.vrm_version
if vrm_version is None:
raise Exception("vrm version is None")
for mesh_id, mesh in enumerate(
[obj for obj in self.export_objects if obj.type == "MESH"]
):
is_skin_mesh = True
if (
len([m for m in mesh.modifiers if m.type == "ARMATURE"]) == 0
and mesh.parent is not None
and mesh.parent.type == "ARMATURE"
and mesh.parent_bone is not None
):
is_skin_mesh = False
node_dic = OrderedDict(
{
"name": mesh.name,
"translation": self.axis_blender_to_glb(mesh.location),
"rotation": [0, 0, 0, 1], # このへんは規約なので
"scale": [1, 1, 1], # このへんは規約なので
"mesh": mesh_id,
}
)
if is_skin_mesh:
node_dic["translation"] = [0, 0, 0] # skinnedmeshはtransformを無視される
# TODO: 決め打ちってどうよ:一体のモデルなのだから2つもあっては困る(から決め打ち(やめろ(やだ))
node_dic["skin"] = 0
self.json_dic["nodes"].append(node_dic)
mesh_node_id = len(self.json_dic["nodes"]) - 1
if is_skin_mesh:
self.json_dic["scenes"][0]["nodes"].append(mesh_node_id)
else:
parent_node = (
[
node
for node in self.json_dic["nodes"]
if node["name"] == mesh.parent_bone
]
+ [None]
)[0]
base_pos = [0, 0, 0]
if parent_node:
if "children" in parent_node:
parent_node["children"].append(mesh_node_id)
else:
parent_node["children"] = [mesh_node_id]
base_pos = self.armature.data.bones[mesh.parent_bone].head_local
else:
self.json_dic["scenes"][0]["nodes"].append(mesh_node_id)
relate_pos = [mesh.location[i] - base_pos[i] for i in range(3)]
self.json_dic["nodes"][mesh_node_id][
"translation"
] = self.axis_blender_to_glb(relate_pos)
# region hell
bpy.ops.object.mode_set(mode="OBJECT")
# region glTF-Blender-IO
# https://github.com/KhronosGroup/glTF-Blender-IO/blob/blender-v2.91-release/addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py#L285-L303
# http://www.apache.org/licenses/LICENSE-2.0
armature_modifiers = {}
if is_skin_mesh:
# temporarily disable Armature modifiers if exporting skins
for idx, modifier in enumerate(mesh.modifiers):
if modifier.type == "ARMATURE":
armature_modifiers[idx] = modifier.show_viewport
modifier.show_viewport = False
depsgraph = bpy.context.evaluated_depsgraph_get()
mesh_owner = mesh.evaluated_get(depsgraph)
mesh_data = mesh_owner.to_mesh(
preserve_all_data_layers=True, depsgraph=depsgraph
).copy()
for prop in mesh.data.keys():
mesh_data[prop] = mesh.data[prop]
if is_skin_mesh:
# restore Armature modifiers
for idx, show_viewport in armature_modifiers.items():
mesh.modifiers[idx].show_viewport = show_viewport
# endregion glTF-Blender-IO
mesh.hide_viewport = False
mesh.hide_select = False
bpy.context.view_layer.objects.active = mesh
bpy.ops.object.mode_set(mode="EDIT")
bm_temp = bmesh.new()
mesh_data.transform(mesh.matrix_world, shape_keys=True)
bm_temp.from_mesh(mesh_data)
if not is_skin_mesh:
# TODO:
bmesh.ops.translate(bm_temp, vec=-mesh.location)
bmesh.ops.triangulate(bm_temp, faces=bm_temp.faces[:])
bm_temp.to_mesh(mesh_data)
bm_temp.free()
if mesh_data.has_custom_normals:
mesh_data.calc_loop_triangles()
mesh_data.calc_normals_split()
bm = bmesh.new()
bm.from_mesh(mesh_data)
# region temporary used
mat_id_dic = {
mat["name"]: i for i, mat in enumerate(self.json_dic["materials"])
}
material_slot_dic = {
i: mat.name for i, mat in enumerate(mesh.material_slots)
}
node_id_dic = {
node["name"]: i for i, node in enumerate(self.json_dic["nodes"])
}
v_group_name_dic = {i: vg.name for i, vg in enumerate(mesh.vertex_groups)}
fmin, fmax = -float_info.max, float_info.max # .minはfloatで一番細かい正の数を示す。
unique_vertex_id = 0
# {(uv...,vertex_index):unique_vertex_id} (uvと頂点番号が同じ頂点は同じものとして省くようにする)
unique_vertex_dic: Dict[Tuple[Any, ...], int] = {}
uvlayers_dic = {
i: uvlayer.name for i, uvlayer in enumerate(mesh_data.uv_layers)
}
# endregion temporary_used
primitive_index_bin_dic: Dict[Optional[int], bytes] = OrderedDict(
{mat_id_dic[mat.name]: b"" for mat in mesh.material_slots}
)
if not primitive_index_bin_dic:
primitive_index_bin_dic[None] = b""
primitive_index_vertex_count: Dict[Optional[int], int] = OrderedDict(
{mat_id_dic[mat.name]: 0 for mat in mesh.material_slots}
)
if not primitive_index_vertex_count:
primitive_index_vertex_count[None] = 0
shape_pos_bin_dic: Dict[str, bytes] = {}
shape_normal_bin_dic: Dict[str, bytes] = {}
shape_min_max_dic: Dict[str, List[List[float]]] = {}
morph_normal_diff_dic: Dict[str, List[List[float]]] = {}
if mesh_data.shape_keys is not None:
# 0番目Basisは省く
shape_pos_bin_dic = OrderedDict(
{shape.name: b"" for shape in mesh_data.shape_keys.key_blocks[1:]}
)
shape_normal_bin_dic = OrderedDict(
{shape.name: b"" for shape in mesh_data.shape_keys.key_blocks[1:]}
)
shape_min_max_dic = OrderedDict(
{
shape.name: [[fmax, fmax, fmax], [fmin, fmin, fmin]]
for shape in mesh_data.shape_keys.key_blocks[1:]
}
)
morph_normal_diff_dic = (
self.fetch_morph_vertex_normal_difference(mesh_data)
if vrm_version.startswith("0.")
else {}
) # {morphname:{vertexid:[diff_X,diff_y,diff_z]}}
position_bin = b""
position_min_max = [[fmax, fmax, fmax], [fmin, fmin, fmin]]
normal_bin = b""
joints_bin = b""
weights_bin = b""
texcoord_bins = {uvlayer_id: b"" for uvlayer_id in uvlayers_dic.keys()}
float_vec4_packer = struct.Struct("<ffff").pack
float_vec3_packer = struct.Struct("<fff").pack
float_pair_packer = struct.Struct("<ff").pack
unsigned_int_scalar_packer = struct.Struct("<I").pack
unsigned_short_vec4_packer = struct.Struct("<HHHH").pack
def min_max(minmax: List[List[float]], position: List[float]) -> None:
for i in range(3):
minmax[0][i] = (
position[i] if position[i] < minmax[0][i] else minmax[0][i]
)
minmax[1][i] = (
position[i] if position[i] > minmax[1][i] else minmax[1][i]
)
for face in bm.faces:
for loop in face.loops:
uv_list = []
for uvlayer_name in uvlayers_dic.values():
uv_layer = bm.loops.layers.uv[uvlayer_name]
uv_list += [loop[uv_layer].uv[0], loop[uv_layer].uv[1]]
vert_normal = [0, 0, 0]
if mesh_data.has_custom_normals:
tri = mesh_data.loop_triangles[face.index]
vid = -1
for i, _vid in enumerate(tri.vertices):
if _vid == loop.vert.index:
vid = i
if vid == -1:
print("something wrong in custom normal export")
vert_normal = tri.split_normals[vid]
else:
if face.smooth:
vert_normal = loop.vert.normal
else:
vert_normal = face.normal
vertex_key = (*uv_list, *vert_normal, loop.vert.index)
cached_vert_id = unique_vertex_dic.get(
vertex_key
) # keyがなければNoneを返す
if cached_vert_id is not None:
primitive_index = None
if face.material_index in material_slot_dic:
primitive_index = mat_id_dic[
material_slot_dic[face.material_index]
]
primitive_index_bin_dic[
primitive_index
] += unsigned_int_scalar_packer(cached_vert_id)
primitive_index_vertex_count[primitive_index] += 1
continue
unique_vertex_dic[vertex_key] = unique_vertex_id
for uvlayer_id, uvlayer_name in uvlayers_dic.items():
uv_layer = bm.loops.layers.uv[uvlayer_name]
uv = loop[uv_layer].uv
texcoord_bins[uvlayer_id] += float_pair_packer(
uv[0], 1 - uv[1]
) # blenderとglbのuvは上下逆
for shape_name in shape_pos_bin_dic:
shape_layer = bm.verts.layers.shape[shape_name]
morph_pos = self.axis_blender_to_glb(
[
loop.vert[shape_layer][i] - loop.vert.co[i]
for i in range(3)
]
)
shape_pos_bin_dic[shape_name] += float_vec3_packer(*morph_pos)
if vrm_version.startswith("0."):
shape_normal_bin_dic[shape_name] += float_vec3_packer(
*self.axis_blender_to_glb(
morph_normal_diff_dic[shape_name][loop.vert.index]
)
)
min_max(shape_min_max_dic[shape_name], morph_pos)
if is_skin_mesh:
weight_and_joint_list: List[Tuple[float, int]] = []
for v_group in mesh_data.vertices[loop.vert.index].groups:
v_group_name = v_group_name_dic.get(v_group.group)
if v_group_name is None:
continue
joint_id = self.joint_id_from_node_name_solver(
v_group_name, node_id_dic
)
# 存在しないボーンを指してる場合は-1を返されてるので、その場合は飛ばす
if joint_id == -1:
continue
# ウエイトがゼロのジョイントの値は無視してゼロになるようにする
# https://github.com/KhronosGroup/glTF/tree/f33f90ad9439a228bf90cde8319d851a52a3f470/specification/2.0#skinned-mesh-attributes
if v_group.weight < float_info.epsilon:
continue
weight_and_joint_list.append((v_group.weight, joint_id))
while len(weight_and_joint_list) < 4:
weight_and_joint_list.append((0.0, 0))
weight_and_joint_list.sort(reverse=True)
if len(weight_and_joint_list) > 4:
print(
f"Joints on vertex id:{loop.vert.index} in: {mesh.name} are truncated"
)
weight_and_joint_list = weight_and_joint_list[:4]
weights = [weight for weight, _ in weight_and_joint_list]
joints = [joint for _, joint in weight_and_joint_list]
if sum(weights) < float_info.epsilon:
print(
f"No weight on vertex id:{loop.vert.index} in: {mesh.name}"
)
# Attach hips bone
hips_bone_name = self.armature.data["hips"]
hips_bone_index = next(
index
for index, node in enumerate(self.json_dic["nodes"])
if node["name"] == hips_bone_name
)
weights = [1.0, 0, 0, 0]
joints = [hips_bone_index, 0, 0, 0]
normalized_weights = (
vrm_types.normalize_weights_compatible_with_gl_float(
weights
)
)
joints_bin += unsigned_short_vec4_packer(*joints)
weights_bin += float_vec4_packer(*normalized_weights)
vert_location = self.axis_blender_to_glb(loop.vert.co)
position_bin += float_vec3_packer(*vert_location)
min_max(position_min_max, vert_location)
normal_bin += float_vec3_packer(
*self.axis_blender_to_glb(vert_normal)
)
primitive_index = None
if face.material_index in material_slot_dic:
primitive_index = mat_id_dic[
material_slot_dic[face.material_index]
]
primitive_index_bin_dic[
primitive_index
] += unsigned_int_scalar_packer(unique_vertex_id)
primitive_index_vertex_count[primitive_index] += 1
unique_vertex_id += 1 # noqa: SIM113
# DONE :index position, uv, normal, position morph,JOINT WEIGHT
# TODO: morph_normal, v_color...?
primitive_glbs_dic = OrderedDict(
{
mat_id: GlbBin(
index_bin,
"SCALAR",
GlConstants.UNSIGNED_INT,
primitive_index_vertex_count[mat_id],
None,
self.glb_bin_collector,
)
for mat_id, index_bin in primitive_index_bin_dic.items()
if index_bin != b""
}
)
pos_glb = GlbBin(
position_bin,
"VEC3",
GlConstants.FLOAT,
unique_vertex_id,
position_min_max,
self.glb_bin_collector,
)
nor_glb = GlbBin(
normal_bin,
"VEC3",
GlConstants.FLOAT,
unique_vertex_id,
None,
self.glb_bin_collector,
)
uv_glbs = [
GlbBin(
texcoord_bin,
"VEC2",
GlConstants.FLOAT,
unique_vertex_id,
None,
self.glb_bin_collector,
)
for texcoord_bin in texcoord_bins.values()
]
joints_glb = None
weights_glb = None
if is_skin_mesh:
joints_glb = GlbBin(
joints_bin,
"VEC4",
GlConstants.UNSIGNED_SHORT,
unique_vertex_id,
None,
self.glb_bin_collector,
)
weights_glb = GlbBin(
weights_bin,
"VEC4",
GlConstants.FLOAT,
unique_vertex_id,
None,
self.glb_bin_collector,
)
morph_pos_glbs = None
morph_normal_glbs = None
if len(shape_pos_bin_dic.keys()) != 0:
morph_pos_glbs = [
GlbBin(
morph_pos_bin,
"VEC3",
GlConstants.FLOAT,
unique_vertex_id,
morph_minmax,
self.glb_bin_collector,
)
for morph_pos_bin, morph_minmax in zip(
shape_pos_bin_dic.values(), shape_min_max_dic.values()
)
]
if vrm_version.startswith("0."):
morph_normal_glbs = [
GlbBin(
morph_normal_bin,
"VEC3",
GlConstants.FLOAT,
unique_vertex_id,
None,
self.glb_bin_collector,
)
for morph_normal_bin in shape_normal_bin_dic.values()
]
primitive_list = []
for primitive_id, index_glb in primitive_glbs_dic.items():
primitive: Dict[str, Any] = OrderedDict({"mode": 4})
if primitive_id is not None:
primitive["material"] = primitive_id
primitive["indices"] = index_glb.accessor_id
primitive["attributes"] = {
"POSITION": pos_glb.accessor_id,
"NORMAL": nor_glb.accessor_id,
}
if is_skin_mesh:
if joints_glb is None:
raise Exception("joints glb is None")
if weights_glb is None:
raise Exception("weights glb is None")
primitive["attributes"].update(
{
"JOINTS_0": joints_glb.accessor_id,
"WEIGHTS_0": weights_glb.accessor_id,
}
)
primitive["attributes"].update(
{
"TEXCOORD_{}".format(i): uv_glb.accessor_id
for i, uv_glb in enumerate(uv_glbs)
}
)
if len(shape_pos_bin_dic.keys()) != 0:
vrm_version = self.vrm_version
if vrm_version is None:
raise Exception("vrm version is None")
if vrm_version.startswith("0."):
if morph_pos_glbs and morph_normal_glbs:
primitive["targets"] = [
{
"POSITION": morph_pos_glb.accessor_id,
"NORMAL": morph_normal_glb.accessor_id,
}
for morph_pos_glb, morph_normal_glb in zip(
morph_pos_glbs, morph_normal_glbs
)
]
elif morph_pos_glbs:
primitive["targets"] = [
{"POSITION": morph_pos_glb.accessor_id}
for morph_pos_glb in morph_pos_glbs
]
primitive["extras"] = {
"targetNames": list(shape_pos_bin_dic.keys())
}
primitive_list.append(primitive)
if mesh.name not in self.mesh_name_to_index:
self.mesh_name_to_index[mesh.name] = len(self.json_dic["meshes"])
self.mesh_name_to_index[mesh.data.name] = len(self.json_dic["meshes"])
self.json_dic["meshes"].append(
OrderedDict({"name": mesh.data.name, "primitives": primitive_list})
)
bm.free()
# endregion hell
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.mode_set(mode="OBJECT")
def exporter_name(self) -> str:
v = version()
if os.environ.get("BLENDER_VRM_USE_TEST_EXPORTER_VERSION") == "true":
v = (999, 999, 999)
return "saturday06_blender_vrm_exporter_experimental_" + ".".join(map(str, v))
def gltf_meta_to_dic(self) -> None:
gltf_meta_dic = {
"extensionsUsed": [
"VRM",
"KHR_materials_unlit",
"KHR_texture_transform",
"VRMC_materials_mtoon",
],
"asset": {
"generator": self.exporter_name(),
"version": "2.0", # glTF version
},
}
self.json_dic.update(gltf_meta_dic)
def vrm_meta_to_dic(self) -> None:
# materialProperties は material_to_dic()で処理する
# region vrm_extension
vrm_extension_dic: Dict[str, Any] = OrderedDict()
vrm_version = self.vrm_version
if vrm_version is None:
raise Exception("vrm version is None")
if vrm_version.startswith("0."):
vrm_extension_dic["exporterVersion"] = self.exporter_name()
vrm_extension_dic["specVersion"] = self.vrm_version
# region meta
vrm_extension_dic["meta"] = vrm_meta_dic = {}
# 安全側に寄せておく
if vrm_version.startswith("0."):
required_vrm_metas = vrm_types.Vrm0.REQUIRED_METAS
vrm_metas = vrm_types.Vrm0.METAS
else:
required_vrm_metas = vrm_types.Vrm1.REQUIRED_METAS
vrm_metas = vrm_types.Vrm1.METAS
for k, v in required_vrm_metas.items():
vrm_meta_dic[k] = self.armature[k] if k in self.armature else v
for key in vrm_metas:
vrm_meta_dic[key] = self.armature[key] if key in self.armature else ""
if "texture" in self.armature:
thumbnail_index_list = [
i
for i, img in enumerate(self.glb_bin_collector.image_bins)
if img.name == self.armature["texture"]
]
if len(thumbnail_index_list) > 0:
self.json_dic["samplers"].append(
{
"magFilter": 9729,
"minFilter": 9729,
"wrapS": 10497,
"wrapT": 10497,
}
)
self.json_dic["textures"].append(
{
"sampler": len(self.json_dic["samplers"]) - 1,
"source": thumbnail_index_list[0],
},
)
vrm_meta_dic["texture"] = len(self.json_dic["textures"]) - 1
# endregion meta
# region humanoid
if vrm_version.startswith("0."):
vrm_extension_dic["humanoid"] = {"humanBones": []}
vrm0_humanoid_dic = vrm_extension_dic["humanoid"]
node_name_id_dic = {
node["name"]: i for i, node in enumerate(self.json_dic["nodes"])
}
for humanbone in (
vrm_types.HumanBones.requires + vrm_types.HumanBones.defines
):
if (
humanbone in self.armature.data
and self.armature.data[humanbone]
and self.armature.data[humanbone] in node_name_id_dic
):
vrm0_humanoid_dic["humanBones"].append(
{
"bone": humanbone,
"node": node_name_id_dic[self.armature.data[humanbone]],
# TODO min,max,center,axisLength : useDef(ry):Trueなら不要な気がするのでほっとく
"useDefaultValues": True,
}
)
vrm0_humanoid_dic.update(
self.textblock2json_dict(
"humanoid_params", vrm_types.Vrm0.HUMANOID_DEFAULT_PARAMS
)
)
else:
vrm_extension_dic["humanoid"] = {"humanBones": {}}
vrm_humanoid_dic: Dict[str, Any] = vrm_extension_dic["humanoid"]
node_name_id_dic = {
node["name"]: i for i, node in enumerate(self.json_dic["nodes"])
}
for humanbone in (
vrm_types.HumanBones.requires + vrm_types.HumanBones.defines
):
if (
humanbone in self.armature.data
and self.armature.data[humanbone]
and self.armature.data[humanbone] in node_name_id_dic
):
vrm_humanoid_dic["humanBones"].update(
{
humanbone: {
"node": node_name_id_dic[self.armature.data[humanbone]]
}
}
)
# endregion humanoid
# region firstPerson
vrm_fp_dic = self.textblock2json_dict(
"firstPerson_params", vrm_types.Vrm0.FIRST_PERSON_DEFAULT_PARAMS
)
vrm_extension_dic["firstPerson"] = vrm_fp_dic
if "firstPersonBone" in vrm_fp_dic and vrm_fp_dic["firstPersonBone"] != -1:
node_name = vrm_fp_dic["firstPersonBone"]
if node_name not in node_name_id_dic:
node_name = self.armature.data["head"]
vrm_fp_dic["firstPersonBone"] = node_name_id_dic[node_name]
if "meshAnnotations" in vrm_fp_dic:
for mesh_annotation in vrm_fp_dic["meshAnnotations"]:
matched_mesh_indices = [
i
for i, mesh in enumerate(self.json_dic["meshes"])
if mesh["name"] == mesh_annotation["mesh"]
]
mesh_annotation["mesh"] = (matched_mesh_indices + [-1])[0]
# TODO VRM1.0 is using node index that has mesh
# TODO
if vrm_version.startswith("1."):
vrm_extension_dic["lookAt"] = {}
self.textblock2json("lookat_params", {})
# endregion firstPerson
# region blendShapeMaster
blendshape_group_name = (
"blendShapeMaster" if vrm_version.startswith("0.") else "blendShape"
)
vrm_extension_dic[blendshape_group_name] = vrm_blend_shape_groups_dic = {}
blend_shape_groups = self.textblock2json_list("blendshape_group", [])
# meshを名前からid
# weightを0-1から0-100に
# shape_indexを名前からindexに
def clamp(min_val: float, val: float, max_val: float) -> float:
if max_val >= val:
if val >= min_val:
return val
print(
"blendshapeGroup weight is between 0 and 1, value is {}".format(val)
)
return min_val
print("blendshapeGroup weight is between 0 and 1, value is {}".format(val))
return max_val
for blend_shape_group in blend_shape_groups:
binds = list(blend_shape_group.get("binds", []))
for bind in binds:
# TODO VRM1.0 is using node index that has mesh
mesh_index = self.mesh_name_to_index.get(bind["mesh"])
if mesh_index is None:
blend_shape_group["binds"].remove(bind)
continue
bind["mesh"] = mesh_index
target_names = json_list_get(
self.json_dic,
["meshes", bind["mesh"], "primitives", 0, "extras", "targetNames"],
[],
)
if bind["index"] not in target_names:
blend_shape_group["binds"].remove(bind)
continue
bind["index"] = target_names.index(bind["index"])
bind["weight"] = (
clamp(0, bind["weight"] * 100, 100)
if vrm_version.startswith("0.")
else clamp(0, bind["weight"], 1)
)
if vrm_version.startswith("1."):
for matval in blend_shape_group["materialValues"]:
matval["material"] = [
i
for i, mat in enumerate(self.json_dic["materials"])
if mat["name"] == matval["material"]
][0]
# TODO isBinary handle : 0 or 1 にするフラグ
vrm_blend_shape_groups_dic["blendShapeGroups"] = blend_shape_groups
# endregion blendShapeMaster
# region secondaryAnimation
springbone_name = (
"springBone" if vrm_version.startswith("1.") else "secondaryAnimation"
)
vrm_extension_dic[springbone_name] = {"boneGroups": [], "colliderGroups": []}
# region colliderGroups
# armatureの子emptyを変換する
collider_group_list = []
empty_dic: Dict[int, Any] = {
node_name_id_dic[ch.parent_bone]: []
for ch in self.armature.children
if ch.type == "EMPTY"
and ch.empty_display_type == "SPHERE"
and ch.parent_bone in node_name_id_dic
}
for child_empty in [
ch
for ch in self.armature.children
if ch.type == "EMPTY"
and ch.empty_display_type == "SPHERE"
and ch.parent_bone in node_name_id_dic
]:
empty_dic[node_name_id_dic[child_empty.parent_bone]].append(child_empty)
for node_id, empty_objs in empty_dic.items():
colliders: List[Any] = []
collider_group = {"node": node_id, "colliders": colliders}
for empty in empty_objs:
collider = {}
empty_offset_pos = [
empty.matrix_world.to_translation()[i]
- (
self.armature.matrix_world
@ Matrix.Translation(
self.armature.data.bones[empty.parent_bone].head_local
)
).to_translation()[i]
for i in range(3)
]
if vrm_version.startswith("0."):
collider["radius"] = empty.empty_display_size
collider["offset"] = OrderedDict(
zip(
("x", "y", "z"),
self.axis_blender_to_glb(empty_offset_pos),
)
)
collider["offset"]["z"] = collider["offset"]["z"] * -1
else:
collider["size"] = [empty.empty_display_size]
collider["offset"] = self.axis_blender_to_glb(empty_offset_pos)
collider["shapeType"] = "sphere"
colliders.append(collider)
collider_group_list.append(collider_group)
vrm_extension_dic[springbone_name]["colliderGroups"] = collider_group_list
# endregion colliderGroups
# region boneGroup
# ボーン名からnode_idに
# collider_groupも名前からcolliderGroupのindexに直す
collider_node_id_list = [c_g["node"] for c_g in collider_group_list]
bone_groups = self.textblock2json_list("spring_bone", [])
for bone_group in bone_groups:
center_node_name = bone_group.get("center")
if (
isinstance(center_node_name, str)
and center_node_name in node_name_id_dic
):
bone_group["center"] = node_name_id_dic[center_node_name]
else:
bone_group["center"] = -1
bone_group["bones"] = [
node_name_id_dic[name]
for name in bone_group["bones"]
if name in node_name_id_dic
]
bone_group["colliderGroups"] = [
collider_node_id_list.index(node_name_id_dic[name])
for name in bone_group["colliderGroups"]
if name in node_name_id_dic
and node_name_id_dic[name] in collider_node_id_list
]
vrm_extension_dic[springbone_name]["boneGroups"] = bone_groups
# endregion boneGroup
# endregion secondaryAnimation
extension_name = "VRM" if vrm_version.startswith("0.") else "VRMC_vrm"
self.json_dic["extensions"][extension_name].update(vrm_extension_dic)
# endregion vrm_extension
# region secondary
self.json_dic["nodes"].append(
{
"name": "secondary",
"translation": [0.0, 0.0, 0.0],
"rotation": [0.0, 0.0, 0.0, 1.0],
"scale": [1.0, 1.0, 1.0],
}
)
self.json_dic["scenes"][0]["nodes"].append(len(self.json_dic["nodes"]) - 1)
def finalize(self) -> None:
bin_json, self.bin = self.glb_bin_collector.pack_all()
self.json_dic.update(bin_json)
if not self.json_dic["meshes"]:
del self.json_dic["meshes"]
if not self.json_dic["materials"]:
del self.json_dic["materials"]
self.result = pack_glb(self.json_dic, self.bin)
if self.use_dummy_armature:
bpy.data.objects.remove(self.armature, do_unlink=True)
def pack_glb(json_dict: Dict[str, Any], binary_chunk: bytes) -> bytes:
magic = b"glTF" + struct.pack("<I", 2)
json_str = json.dumps(json_dict).encode("utf-8")
if len(json_str) % 4 != 0:
json_str += b"\x20" * (4 - len(json_str) % 4)
json_size = struct.pack("<I", len(json_str))
if len(binary_chunk) % 4 != 0:
binary_chunk += b"\x00" * (4 - len(binary_chunk) % 4)
bin_size = struct.pack("<I", len(binary_chunk))
total_size = struct.pack(
"<I", len(json_str) + len(binary_chunk) + 28
) # include header size
return (
magic
+ total_size
+ json_size
+ b"JSON"
+ json_str
+ bin_size
+ b"BIN\x00"
+ binary_chunk
)
| []
| []
| [
"BLENDER_VRM_USE_TEST_EXPORTER_VERSION"
]
| [] | ["BLENDER_VRM_USE_TEST_EXPORTER_VERSION"] | python | 1 | 0 | |
test/e2e_test.go | //
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build e2e
// +build e2e
package test
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"testing"
"time"
ftime "time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/registry"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/remote"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sigstore/cosign/cmd/cosign/cli"
"github.com/sigstore/cosign/cmd/cosign/cli/attach"
"github.com/sigstore/cosign/cmd/cosign/cli/attest"
"github.com/sigstore/cosign/cmd/cosign/cli/download"
"github.com/sigstore/cosign/cmd/cosign/cli/generate"
"github.com/sigstore/cosign/cmd/cosign/cli/options"
"github.com/sigstore/cosign/cmd/cosign/cli/publickey"
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
"github.com/sigstore/cosign/cmd/cosign/cli/upload"
cliverify "github.com/sigstore/cosign/cmd/cosign/cli/verify"
"github.com/sigstore/cosign/pkg/cosign"
"github.com/sigstore/cosign/pkg/cosign/kubernetes"
cremote "github.com/sigstore/cosign/pkg/cosign/remote"
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
"github.com/sigstore/cosign/pkg/sget"
sigs "github.com/sigstore/cosign/pkg/signature"
"github.com/sigstore/sigstore/pkg/signature/payload"
)
const (
serverEnv = "REKOR_SERVER"
rekorURL = "https://rekor.sigstore.dev"
)
var keyPass = []byte("hello")
var passFunc = func(_ bool) ([]byte, error) {
return keyPass, nil
}
var verify = func(keyRef, imageRef string, checkClaims bool, annotations map[string]interface{}, attachment string) error {
cmd := cliverify.VerifyCommand{
KeyRef: keyRef,
RekorURL: rekorURL,
CheckClaims: checkClaims,
Annotations: sigs.AnnotationsMap{Annotations: annotations},
Attachment: attachment,
}
args := []string{imageRef}
return cmd.Exec(context.Background(), args)
}
func TestSignVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// Sign the image with an annotation
annotations := map[string]interface{}{"foo": "bar"}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, annotations, []string{imgName}, "", true, "", "", false, false, ""), t)
// It should match this time.
must(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
// But two doesn't work
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar", "baz": "bat"}, ""), t)
}
func TestSignVerifyClean(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, _ = mkimage(t, imgName)
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now clean signature from the given image
must(cli.CleanCmd(ctx, options.RegistryOptions{}, imgName), t)
// It doesn't work
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestAttestVerify(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-attest-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
verifyAttestation := cliverify.VerifyAttestationCommand{
KeyRef: pubKeyPath,
}
// Fail case when using without type and policy flag
mustErr(verifyAttestation.Exec(ctx, []string{imgName}), t)
slsaAttestation := `{ "builder": { "id": "2" }, "recipe": {} }`
slsaAttestationPath := filepath.Join(td, "attestation.slsa.json")
if err := os.WriteFile(slsaAttestationPath, []byte(slsaAttestation), 0600); err != nil {
t.Fatal(err)
}
// Now attest the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(attest.AttestCmd(ctx, ko, options.RegistryOptions{}, imgName, "", false, slsaAttestationPath, false,
"custom", false, ftime.Duration(30*time.Second)), t)
// Use cue to verify attestation
policyPath := filepath.Join(td, "policy.cue")
verifyAttestation.PredicateType = "slsaprovenance"
verifyAttestation.Policies = []string{policyPath}
// Fail case
cuePolicy := `builder: id: "1"`
if err := os.WriteFile(policyPath, []byte(cuePolicy), 0600); err != nil {
t.Fatal(err)
}
// Success case
cuePolicy = `builder: id: "2"`
if err := os.WriteFile(policyPath, []byte(cuePolicy), 0600); err != nil {
t.Fatal(err)
}
must(verifyAttestation.Exec(ctx, []string{imgName}), t)
// Look for a specific annotation
mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}, ""), t)
}
func TestBundle(t *testing.T) {
// turn on the tlog
defer setenv(t, options.ExperimentalEnv, "1")()
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
// Sign the image
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Make sure verify works
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Make sure offline verification works with bundling
// use rekor prod since we have hardcoded the public key
os.Setenv(serverEnv, "notreal")
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestDuplicateSign(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
ref, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// So should download
mustErr(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Now sign the image
ko := sign.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify and download should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
must(download.SignatureCmd(ctx, options.RegistryOptions{}, imgName), t)
// Signing again should work just fine...
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if len(signatures) > 1 {
t.Errorf("expected there to only be one signature, got %v", signatures)
}
}
func TestKeyURLVerify(t *testing.T) {
// TODO: re-enable once distroless images are being signed by the new client
t.Skip()
// Verify that an image can be verified via key url
keyRef := "https://raw.githubusercontent.com/GoogleContainerTools/distroless/main/cosign.pub"
img := "gcr.io/distroless/base:latest"
must(verify(keyRef, img, true, nil, ""), t)
}
func TestGenerateKeyPairEnvVar(t *testing.T) {
defer setenv(t, "COSIGN_PASSWORD", "foo")()
keys, err := cosign.GenerateKeyPair(generate.GetPass)
if err != nil {
t.Fatal(err)
}
if _, err := cosign.LoadECDSAPrivateKey(keys.PrivateBytes, []byte("foo")); err != nil {
t.Fatal(err)
}
}
func TestGenerateKeyPairK8s(t *testing.T) {
td := t.TempDir()
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
password := "foo"
defer setenv(t, "COSIGN_PASSWORD", password)()
ctx := context.Background()
name := "cosign-secret"
namespace := "default"
if err := kubernetes.KeyPairSecret(ctx, fmt.Sprintf("k8s://%s/%s", namespace, name), generate.GetPass); err != nil {
t.Fatal(err)
}
// make sure the secret actually exists
client, err := kubernetes.Client()
if err != nil {
t.Fatal(err)
}
s, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
if v, ok := s.Data["cosign.password"]; !ok || string(v) != password {
t.Fatalf("password is incorrect, got %v expected %v", v, "foo")
}
}
func TestMultipleSignatures(t *testing.T) {
repo, stop := reg(t)
defer stop()
td1 := t.TempDir()
td2 := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, priv1, pub1 := keypair(t, td1)
_, priv2, pub2 := keypair(t, td2)
ctx := context.Background()
// Verify should fail at first for both keys
mustErr(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign the image with one key
ko := sign.KeyOpts{KeyRef: priv1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify should work with that one, but not the other
must(verify(pub1, imgName, true, nil, ""), t)
mustErr(verify(pub2, imgName, true, nil, ""), t)
// Now sign with the other key too
ko.KeyRef = priv2
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify should work with both
must(verify(pub1, imgName, true, nil, ""), t)
must(verify(pub2, imgName, true, nil, ""), t)
}
func TestSignBlob(t *testing.T) {
blob := "someblob"
td1 := t.TempDir()
td2 := t.TempDir()
t.Cleanup(func() {
os.RemoveAll(td1)
os.RemoveAll(td2)
})
bp := filepath.Join(td1, blob)
if err := os.WriteFile(bp, []byte(blob), 0644); err != nil {
t.Fatal(err)
}
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
ctx := context.Background()
ko1 := sign.KeyOpts{
KeyRef: pubKeyPath1,
}
ko2 := sign.KeyOpts{
KeyRef: pubKeyPath2,
}
// Verify should fail on a bad input
mustErr(cliverify.VerifyBlobCmd(ctx, ko1, "", "badsig", blob), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", "badsig", blob), t)
// Now sign the blob with one key
ko := sign.KeyOpts{
KeyRef: privKeyPath1,
PassFunc: passFunc,
}
sig, err := sign.SignBlobCmd(ctx, ko, options.RegistryOptions{}, bp, true, "", "", time.Duration(30*time.Second))
if err != nil {
t.Fatal(err)
}
// Now verify should work with that one, but not the other
must(cliverify.VerifyBlobCmd(ctx, ko1, "", string(sig), bp), t)
mustErr(cliverify.VerifyBlobCmd(ctx, ko2, "", string(sig), bp), t)
}
func TestGenerate(t *testing.T) {
repo, stop := reg(t)
defer stop()
imgName := path.Join(repo, "cosign-e2e")
_, desc, cleanup := mkimage(t, imgName)
defer cleanup()
// Generate the payload for the image, and check the digest.
b := bytes.Buffer{}
must(generate.GenerateCmd(context.Background(), options.RegistryOptions{}, imgName, nil, &b), t)
ss := payload.SimpleContainerImage{}
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
// Now try with some annotations.
b.Reset()
a := map[string]interface{}{"foo": "bar"}
must(generate.GenerateCmd(context.Background(), options.RegistryOptions{}, imgName, a, &b), t)
must(json.Unmarshal(b.Bytes(), &ss), t)
equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t)
equals(ss.Optional["foo"], "bar", t)
}
func keypair(t *testing.T, td string) (*cosign.Keys, string, string) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(td); err != nil {
t.Fatal(err)
}
defer func() {
os.Chdir(wd)
}()
keys, err := cosign.GenerateKeyPair(passFunc)
if err != nil {
t.Fatal(err)
}
privKeyPath := filepath.Join(td, "cosign.key")
if err := os.WriteFile(privKeyPath, keys.PrivateBytes, 0600); err != nil {
t.Fatal(err)
}
pubKeyPath := filepath.Join(td, "cosign.pub")
if err := os.WriteFile(pubKeyPath, keys.PublicBytes, 0600); err != nil {
t.Fatal(err)
}
return keys, privKeyPath, pubKeyPath
}
func TestUploadDownload(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
testCases := map[string]struct {
signature string
signatureType attach.SignatureArgType
expectedErr bool
}{
"file containing signature": {
signature: "testsignaturefile",
signatureType: attach.FileSignature,
expectedErr: false,
},
"raw signature as argument": {
signature: "testsignatureraw",
signatureType: attach.RawSignature,
expectedErr: false,
},
"empty signature as argument": {
signature: "",
signatureType: attach.RawSignature,
expectedErr: true,
},
}
imgName := path.Join(repo, "cosign-e2e")
for testName, testCase := range testCases {
t.Run(testName, func(t *testing.T) {
ref, _, cleanup := mkimage(t, imgName)
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
signature := base64.StdEncoding.EncodeToString([]byte(testCase.signature))
var sigRef string
if testCase.signatureType == attach.FileSignature {
sigRef = mkfile(signature, td, t)
} else {
sigRef = signature
}
// Upload it!
err := attach.SignatureCmd(ctx, options.RegistryOptions{}, sigRef, payloadPath, imgName)
if testCase.expectedErr {
mustErr(err, t)
} else {
must(err, t)
}
// Now download it!
se, err := ociremote.SignedEntity(ref, ociremote.WithRemoteOptions(registryClientOpts(ctx)...))
must(err, t)
sigs, err := se.Signatures()
must(err, t)
signatures, err := sigs.Get()
must(err, t)
if testCase.expectedErr {
if len(signatures) != 0 {
t.Fatalf("unexpected signatures %d, wanted 0", len(signatures))
}
} else {
if len(signatures) != 1 {
t.Fatalf("unexpected signatures %d, wanted 1", len(signatures))
}
if b64sig, err := signatures[0].Base64Signature(); err != nil {
t.Fatalf("Base64Signature() = %v", err)
} else if diff := cmp.Diff(b64sig, signature); diff != "" {
t.Error(diff)
}
if p, err := signatures[0].Payload(); err != nil {
t.Fatalf("Payload() = %v", err)
} else if diff := cmp.Diff(p, []byte(payload)); diff != "" {
t.Error(diff)
}
}
// Now delete it!
cleanup()
})
}
}
func TestUploadBlob(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
ctx := context.Background()
imgName := path.Join(repo, "/cosign-upload-e2e")
payload := "testpayload"
payloadPath := mkfile(payload, td, t)
// Upload it!
files := []cremote.File{cremote.FileFromFlag(payloadPath)}
must(upload.BlobCmd(ctx, options.RegistryOptions{}, files, "", imgName), t)
// Check it
ref, err := name.ParseReference(imgName)
if err != nil {
t.Fatal(err)
}
// Now download it with sget (this should fail by tag)
if err := sget.New(imgName, "", os.Stdout).Do(ctx); err == nil {
t.Error("expected download to fail")
}
img, err := remote.Image(ref)
if err != nil {
t.Fatal(err)
}
dgst, err := img.Digest()
if err != nil {
t.Fatal(err)
}
result := &bytes.Buffer{}
// But pass by digest
if err := sget.New(imgName+"@"+dgst.String(), "", result).Do(ctx); err != nil {
t.Fatal(err)
}
b, err := io.ReadAll(result)
if err != nil {
t.Fatal(err)
}
if string(b) != payload {
t.Errorf("expected contents to be %s, got %s", payload, string(b))
}
}
func TestAttachSBOM(t *testing.T) {
repo, stop := reg(t)
defer stop()
ctx := context.Background()
imgName := path.Join(repo, "sbom-image")
img, _, cleanup := mkimage(t, imgName)
defer cleanup()
out := bytes.Buffer{}
_, err := download.SBOMCmd(ctx, options.RegistryOptions{}, img.Name(), &out)
if err == nil {
t.Fatal("Expected error")
}
t.Log(out.String())
out.Reset()
// Upload it!
must(attach.SBOMCmd(ctx, options.RegistryOptions{}, "./testdata/bom-go-mod.spdx", "spdx", imgName), t)
sboms, err := download.SBOMCmd(ctx, options.RegistryOptions{}, imgName, &out)
if err != nil {
t.Fatal(err)
}
t.Log(out.String())
if len(sboms) != 1 {
t.Fatalf("Expected one sbom, got %d", len(sboms))
}
want, err := os.ReadFile("./testdata/bom-go-mod.spdx")
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(string(want), sboms[0]); diff != "" {
t.Errorf("diff: %s", diff)
}
// Generate key pairs to sign the sbom
td1 := t.TempDir()
td2 := t.TempDir()
_, privKeyPath1, pubKeyPath1 := keypair(t, td1)
_, _, pubKeyPath2 := keypair(t, td2)
// Verify should fail on a bad input
mustErr(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
// Now sign the sbom with one key
ko1 := sign.KeyOpts{KeyRef: privKeyPath1, PassFunc: passFunc}
must(sign.SignCmd(ctx, ko1, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, "sbom"), t)
// Now verify should work with that one, but not the other
must(verify(pubKeyPath1, imgName, true, nil, "sbom"), t)
mustErr(verify(pubKeyPath2, imgName, true, nil, "sbom"), t)
}
func setenv(t *testing.T, k, v string) func() {
if err := os.Setenv(k, v); err != nil {
t.Fatalf("error setitng env: %v", err)
}
return func() {
os.Unsetenv(k)
}
}
func TestTlog(t *testing.T) {
repo, stop := reg(t)
defer stop()
td := t.TempDir()
imgName := path.Join(repo, "cosign-e2e")
_, _, cleanup := mkimage(t, imgName)
defer cleanup()
_, privKeyPath, pubKeyPath := keypair(t, td)
ctx := context.Background()
// Verify should fail at first
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now sign the image without the tlog
ko := sign.KeyOpts{
KeyRef: privKeyPath,
PassFunc: passFunc,
RekorURL: rekorURL,
}
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// Now verify should work!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
// Now we turn on the tlog!
defer setenv(t, options.ExperimentalEnv, "1")()
// Verify shouldn't work since we haven't put anything in it yet.
mustErr(verify(pubKeyPath, imgName, true, nil, ""), t)
// Sign again with the tlog env var on
must(sign.SignCmd(ctx, ko, options.RegistryOptions{}, nil, []string{imgName}, "", true, "", "", false, false, ""), t)
// And now verify works!
must(verify(pubKeyPath, imgName, true, nil, ""), t)
}
func TestGetPublicKeyCustomOut(t *testing.T) {
td := t.TempDir()
keys, privKeyPath, _ := keypair(t, td)
ctx := context.Background()
outFile := "output.pub"
outPath := filepath.Join(td, outFile)
outWriter, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE, 0600)
must(err, t)
pk := publickey.Pkopts{
KeyRef: privKeyPath,
}
must(publickey.GetPublicKey(ctx, pk, publickey.NamedWriter{Name: outPath, Writer: outWriter}, passFunc), t)
output, err := os.ReadFile(outPath)
must(err, t)
equals(keys.PublicBytes, output, t)
}
func mkfile(contents, td string, t *testing.T) string {
f, err := os.CreateTemp(td, "")
if err != nil {
t.Fatal(err)
}
defer f.Close()
if _, err := f.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
return f.Name()
}
func mkimage(t *testing.T, n string) (name.Reference, *remote.Descriptor, func()) {
ref, err := name.ParseReference(n, name.WeakValidation)
if err != nil {
t.Fatal(err)
}
img, err := random.Image(512, 5)
if err != nil {
t.Fatal(err)
}
regClientOpts := registryClientOpts(context.Background())
if err := remote.Write(ref, img, regClientOpts...); err != nil {
t.Fatal(err)
}
remoteImage, err := remote.Get(ref, regClientOpts...)
if err != nil {
t.Fatal(err)
}
cleanup := func() {
_ = remote.Delete(ref, regClientOpts...)
ref, _ := ociremote.SignatureTag(ref.Context().Digest(remoteImage.Descriptor.Digest.String()), ociremote.WithRemoteOptions(regClientOpts...))
_ = remote.Delete(ref, regClientOpts...)
}
return ref, remoteImage, cleanup
}
func must(err error, t *testing.T) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
func mustErr(err error, t *testing.T) {
t.Helper()
if err == nil {
t.Fatal("expected error")
}
}
func equals(v1, v2 interface{}, t *testing.T) {
if diff := cmp.Diff(v1, v2); diff != "" {
t.Error(diff)
}
}
func reg(t *testing.T) (string, func()) {
repo := os.Getenv("COSIGN_TEST_REPO")
if repo != "" {
return repo, func() {}
}
t.Log("COSIGN_TEST_REPO unset, using fake registry")
r := httptest.NewServer(registry.New())
u, err := url.Parse(r.URL)
if err != nil {
t.Fatal(err)
}
return u.Host, r.Close
}
func registryClientOpts(ctx context.Context) []remote.Option {
return []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
remote.WithContext(ctx),
}
}
| [
"\"COSIGN_TEST_REPO\""
]
| []
| [
"COSIGN_TEST_REPO"
]
| [] | ["COSIGN_TEST_REPO"] | go | 1 | 0 | |
main.go | package main
import (
"archive/zip"
"bytes"
"fmt"
"github.com/labstack/gommon/log"
flag "github.com/spf13/pflag"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
)
func init() {
log.SetPrefix("unzip")
format := strings.ToLower(os.Getenv("LOGGING_FORMAT"))
if format != "json" {
log.SetHeader(`${prefix}, ${level} ${short_file}(${line})`)
}
log.SetOutput(os.Stdout)
level := strings.ToLower(os.Getenv("LOGGING_LEVEL"))
x := levelOf(level)
log.SetLevel(x)
}
func main() {
var destination string
flag.Usage = func() {
fmt.Printf("Unzip File to Destination Folder\n\nUSAGE:\n%s <filename> [OPTIONS]\n\nOPTIONS:\n", os.Args[0])
flag.PrintDefaults()
fmt.Println()
}
flag.StringVarP(&destination, "exdir", "d", ".", "Directory where files be extracted into")
flag.Parse()
if len(os.Args) <= 1 {
flag.Usage()
os.Exit(0)
}
if strings.HasSuffix(os.Args[1], "help") {
flag.Usage()
os.Exit(0)
}
if strings.HasSuffix(os.Args[1], "version") {
fmt.Printf("tiny-unzip %s (%s %s)\n", AppVersion, AppRevision, AppBuildDate)
os.Exit(0)
}
zipFilename := os.Args[1]
err := Unzip(zipFilename, destination)
if err != nil {
fmt.Println("Err", err)
}
}
func Unzip(zipFilename string, destination string) error {
archive, err := zip.OpenReader(zipFilename)
if err != nil {
return err
}
defer archive.Close()
linkMap := make(map[string]string, 0)
for _, f := range archive.File {
filePath := filepath.Join(destination, f.Name)
if f.FileInfo().IsDir() {
_ = os.MkdirAll(filePath, os.ModePerm)
continue
}
dir := filepath.Dir(filePath)
_ = os.MkdirAll(dir, os.ModePerm)
fileInArchive, err := f.Open()
//log.Debugf("%s %s", f.Name, f.FileInfo().Mode())
if f.Mode()&fs.ModeSymlink != 0 {
//log.Debug(f.Mode() & fs.ModeSymlink)
buf := new(bytes.Buffer)
_, err := io.Copy(buf, fileInArchive)
if err != nil {
return err
}
linkMap[f.Name] = buf.String()
continue
}
destFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
if _, err := io.Copy(destFile, fileInArchive); err != nil {
return err
}
destFile.Close()
fileInArchive.Close()
}
wd, err := os.Getwd()
err = os.Chdir(destination)
if err != nil {
return err
}
for k, v := range linkMap {
log.Debugf("%s => %s", v, k)
err = os.Symlink(v, k)
if err != nil {
log.Error(err)
}
}
_ = os.Chdir(wd)
return nil
}
func levelOf(s string) log.Lvl {
switch s {
case "debug":
return log.DEBUG
case "info":
return log.INFO
case "warn":
return log.WARN
case "error":
return log.ERROR
default:
return log.OFF
}
}
| [
"\"LOGGING_FORMAT\"",
"\"LOGGING_LEVEL\""
]
| []
| [
"LOGGING_LEVEL",
"LOGGING_FORMAT"
]
| [] | ["LOGGING_LEVEL", "LOGGING_FORMAT"] | go | 2 | 0 | |
scripts/populate_archive.py | """Populates a Vesper web archive from an old desktop archive."""
from collections import defaultdict
import datetime
import os
import random
import sys
import time
# Set up Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'vesper.django.project.settings'
import django
django.setup()
from django.db import transaction
from vesper.archive.archive import Archive
from vesper.archive.recording import Recording as OldRecording
from vesper.django.app.models import (
AnnotationInfo, Clip, DeviceConnection, Recording, RecordingChannel,
Station)
from vesper.singletons import archive, clip_manager
import vesper.django.app.model_utils as model_utils
import vesper.util.audio_file_utils as audio_file_utils
import vesper.util.os_utils as os_utils
import vesper.util.time_utils as time_utils
# TODO: Generalize this and package it as a Vesper importer that can be
# used in an import command.
#
# The importer requires that all stations, devices, processors, and
# annotations needed by the import are already present in the target
# archive.
#
# Command arguments will include:
#
# * The full path of the source archive directory, which must be on the server.
#
# * YAML describing:
#
# * A mapping from source archive station names to target archive
# (station name, microphone output name) pairs.
#
# * A mapping from source archive detector names to target archive
# detector names.
#
# * Recording schedules, if needed. Recording schedules are needed
# if and only if the source archive does not include recording
# metadata.
_CREATE_FAKE_RECORDINGS = False
"""Set `True` if and only if source archive does not contain recordings."""
_CLIP_COUNT_LIMIT = 1000000000
"""
The approximate maximum number of clips to process.
The script processes clips in variable, night-sized chunks, so the actual
number of clips processed may differ from the specified number.
"""
_DETECTOR_NAME_ALIASES = {
'Old Bird Thrush Detector': ['Thrush'],
'Old Bird Tseep Detector': ['Tseep']
}
# _DETECTOR_NAME_ALIASES = {
# 'Old Bird Tseep Detector': ['Tseep']
# }
# Assumptions about station and microphone model names:
#
# * If a station name ends with one of the elements of _UNCORRECTED_MIC_NAMES,
# the station name is a combination of a station name and an uncorrected
# microphone model name.
#
# * If a station name does not end with one of the elements of
# _UNCORRECTED_MIC_NAMES, the station name is just a plain station name
# and the name of its microphone model is _DEFAULT_MIC_NAME.
_STATION_NAME_CORRECTIONS = {
"St Mary's": 'St Mary'
}
_UNCORRECTED_MIC_NAMES = frozenset(['21c', 'NFC', 'SMX-II'])
_MIC_NAME_CORRECTIONS = {
'NFC': 'SMX-NFC'
}
_DEFAULT_MIC_NAME = 'SMX-NFC'
# _DEFAULT_MIC_NAME = '21c'
_SHOW_RECORDINGS = False
_RECORDING_PROGRESS_PERIOD = 1000
_SHOW_CLIPS = False
_CLIP_FILES_AVAILABLE = True
_COPY_CLIP_FILES = True
_NON_CALL_CLIP_INCLUSION_PROBABILITY = .11
_PAUSE_FILE_PATH = '/Users/Harold/Desktop/Pause'
_PAUSE_CHECK_PERIOD = 100
_ONE_NIGHT = datetime.timedelta(days=1)
def _main():
dest_archive_dir_path = os.getcwd()
source_archive_dir_path = sys.argv[1]
print(
'Populating archive "{}" from archive "{}"...'.format(
dest_archive_dir_path, source_archive_dir_path))
random.seed(0)
_delete_data()
_add_recordings(source_archive_dir_path)
_add_clips(source_archive_dir_path)
def _delete_data():
print(
'Deleting any existing recordings, clips, and annotations from '
'destination archive...')
# Deleting a recording also deletes it clips, and deleting a clip
# deletes its annotations.
for recording in Recording.objects.all():
recording.delete()
def _add_recordings(source_archive_dir_path):
processing_start_time = time.time()
channel_recordings = _get_channel_recordings(source_archive_dir_path)
# Partition recording channels into sets that belong to the same
# recording. The result is a mapping from (station, start_time)
# pairs (with each pair representing a recording) to sets of
# (recording_channel, mic_output) pairs.
channel_info_sets = defaultdict(set)
for r in channel_recordings:
station, mic_output, recorder_input = \
_get_recording_channel_info(r.station.name)
channel_info_sets[(station, r.start_time)].add(
(r, mic_output, recorder_input))
keys = sorted(channel_info_sets.keys(), key=lambda p: (p[0].name, p[1]))
num_recordings = len(keys)
print('Adding recordings to destination archive...')
for i, (station, start_time) in enumerate(keys):
if i % _RECORDING_PROGRESS_PERIOD == 0 and i != 0:
print('Added {} of {} recordings...'.format(i, num_recordings))
channel_infos = list(channel_info_sets[(station, start_time)])
channel_infos.sort(key=lambda i: i[2].channel_num)
r, _, recorder_input = channel_infos[0]
# Extend the length of the recording artificially by two seconds.
# We do this because we have encountered cases where clips that
# were extracted from recordings are stamped with times that are
# up to a second past the end of the recording. This script
# rejects clips that don't seem to belong to any known recording,
# but we want to retain these. In cases where we have the recordings
# from which the clips were extracted, we can later find the precise
# start indices of the clips in the recordings, and correct both
# the clip start times and the recording durations in the archive.
length = r.length + 2 * r.sample_rate
r = OldRecording(r.station, r.start_time, length, r.sample_rate)
recorder = recorder_input.device
num_channels = len(channel_infos)
span = (r.length - 1) / r.sample_rate
end_time = r.start_time + datetime.timedelta(seconds=span)
creation_time = time_utils.get_utc_now()
recording = Recording(
station=station,
recorder=recorder,
num_channels=num_channels,
length=r.length,
sample_rate=r.sample_rate,
start_time=r.start_time,
end_time=end_time,
creation_time=creation_time,
creating_job=None)
recording.save()
if _SHOW_RECORDINGS:
# print('Recording {} {}'.format(i, str(recording)))
print('Recording {} {} / {} / {} / {} / {} / {}'.format(
i, station.name, r.start_time, r.length, r.sample_rate,
r.length / r.sample_rate, end_time))
for _, mic_output, recorder_input in channel_infos:
# We assume here that the recording channel number is the
# same as the recorder channel number.
channel_num = recorder_input.channel_num
channel = RecordingChannel(
recording=recording,
channel_num=channel_num,
recorder_channel_num=channel_num,
mic_output=mic_output)
channel.save()
if _SHOW_RECORDINGS:
print(' Channel {} {}'.format(channel_num, mic_output.name))
elapsed_time = time.time() - processing_start_time
rate = num_recordings / elapsed_time
print((
'Added a total of {} recordings in {:.1f} seconds, an average of '
'{:.1f} recordings per second.').format(len(keys), elapsed_time, rate))
def _get_channel_recordings(archive_dir_path):
archive = Archive(archive_dir_path)
archive.open()
stations = archive.stations
start_night = archive.start_night
end_night = archive.end_night
channels = set()
for station in stations:
night = start_night
while night <= end_night:
for r in _get_night_channel_recordings(archive, station, night):
channels.add(r)
night += _ONE_NIGHT
archive.close()
channels = list(channels)
channels.sort(key=lambda r: (r.station.name, r.start_time))
return channels
def _get_night_channel_recordings(archive, station, night):
if _CREATE_FAKE_RECORDINGS:
return _create_fake_night_channel_recordings(archive, station, night)
else:
return archive.get_recordings(station.name, night)
_FAKE_RECORDING_START_HOUR = 19
_FAKE_RECORDING_DURATION = 12
_FAKE_RECORDING_SAMPLE_RATE = 22050
def _create_fake_night_channel_recordings(archive, station, night):
from vesper.archive.recording import Recording as RecordingOld
start_time = time_utils.create_utc_datetime(
night.year, night.month, night.day, _FAKE_RECORDING_START_HOUR,
time_zone=station.time_zone)
length = \
_FAKE_RECORDING_DURATION * 3600 * _FAKE_RECORDING_SAMPLE_RATE
channel = RecordingOld(
station, start_time, length, _FAKE_RECORDING_SAMPLE_RATE)
return [channel]
def _get_recording_channel_info(station_name):
station_name, mic_name = _get_station_and_mic_name(station_name)
station = Station.objects.get(name=station_name)
mic = station.devices.get(
model__type='Microphone',
name__startswith=mic_name)
# We assume here that each mic has exactly one output.
mic_output = mic.outputs.all()[0]
# We assume here that each mic output is connected to the same
# channel of the same recorder throughout the archive.
connection = DeviceConnection.objects.get(output=mic_output)
recorder_input = connection.input
return station, mic_output, recorder_input
def _get_station_and_mic_name(station_name):
for mic_name in _UNCORRECTED_MIC_NAMES:
if station_name.endswith(mic_name):
station_name = station_name[:-(len(mic_name) + 1)]
station_name = _correct(station_name, _STATION_NAME_CORRECTIONS)
mic_name = _correct(mic_name, _MIC_NAME_CORRECTIONS)
return (station_name, mic_name)
# If we get here, the station name does not end with any of the
# elements of _UNCORRECTED_MIC_NAMES.
return (station_name, _DEFAULT_MIC_NAME)
def _correct(name, name_corrections):
return name_corrections.get(name, name)
_clip_count = 0
def _add_clips(source_archive_dir_path):
print('Adding clips to destination archive...')
processing_start_time = time.time()
global _clip_count
archive = Archive(source_archive_dir_path)
archive.open()
stations = archive.stations
start_night = archive.start_night
end_night = archive.end_night
detectors = _get_detectors()
annotation_infos = _get_annotation_infos()
num_added = 0
num_rejected = 0
num_excluded = 0
for station in stations:
night = start_night
while night <= end_night:
clips = archive.get_clips(station_name=station.name, night=night)
num_clips = len(clips)
if num_clips != 0:
print(
'Adding {} clips for station "{}", night {}...'.format(
num_clips, station.name, night))
start_time = time.time()
with transaction.atomic():
m, n, p = _add_clips_aux(
clips, night, detectors, annotation_infos)
elapsed_time = time.time() - start_time
rate = num_clips / elapsed_time
print((
'Processed {} clips in {:.1f} seconds, an average of '
'{:.1f} clips per second.').format(
num_clips, elapsed_time, rate))
num_added += m
num_rejected += n
num_excluded += p
if _clip_count >= _CLIP_COUNT_LIMIT:
break
night += _ONE_NIGHT
if _clip_count >= _CLIP_COUNT_LIMIT:
break
archive.close()
num_clips = num_added + num_rejected + num_excluded
elapsed_time = time.time() - processing_start_time
rate = num_clips / elapsed_time
print((
'Processed a total of {} clips in {:.1f} seconds, an average of '
'{:.1f} clips per second.').format(num_clips, elapsed_time, rate))
print(
'Added a total of {} clips, rejected {}, excluded {}.'.format(
num_added, num_rejected, num_excluded))
def _get_detectors():
detectors = archive.instance.get_processors_of_type('Detector')
detectors = dict((d.name, d) for d in detectors)
for name, aliases in _DETECTOR_NAME_ALIASES.items():
detector = detectors[name]
for alias in aliases:
detectors[alias] = detector
return detectors
def _get_annotation_infos():
infos = AnnotationInfo.objects.all()
return dict((i.name, i) for i in infos)
def _add_clips_aux(clips, night, detectors, annotation_infos):
global _clip_count
annotation_info = \
_get_annotation_info('Classification', annotation_infos)
num_added = 0
num_rejected = 0
num_excluded = 0
for c in clips:
_clip_count += 1
if _clip_count % _PAUSE_CHECK_PERIOD == 0:
_pause_if_indicated()
if not _include_clip(c):
num_excluded += 1
continue
file_path = c.file_path
if _CLIP_FILES_AVAILABLE and not (os.path.exists(file_path)):
print(
'Could not find clip file "{}". Clip will be ignored.'.format(
file_path))
num_rejected += 1
continue
try:
channel = _get_clip_recording_channel(c)
except Exception:
print((
'Could not get recording channel for clip "{}". '
'Clip will be ignored').format(file_path))
num_rejected += 1
continue
try:
detector = _get_detector(c, detectors)
except ValueError:
print((
'Could not get detector "{}" for clip "{}". '
'Clip will be ignored.').format(c.detector_name, file_path))
num_rejected += 1
continue
# The code between here and the return statement used to be a
# single database transaction.
# with transaction.atomic():
recording = channel.recording
station = recording.station
mic_output = channel.mic_output
sample_rate = recording.sample_rate
if _CLIP_FILES_AVAILABLE:
try:
length = audio_file_utils.get_wave_file_info(file_path).length
except Exception as e:
print((
'Could not read audio file info for clip "{}". '
'Error message was: {}. '
'Clip will be ignored.').format(file_path, str(e)))
num_rejected += 1
continue
else:
length = c.duration * sample_rate
start_time = c.start_time
span = (length - 1) / sample_rate
end_time = start_time + datetime.timedelta(seconds=span)
creation_time = time_utils.get_utc_now()
clip = Clip(
station=station,
mic_output=mic_output,
recording_channel=channel,
start_index=None,
length=length,
sample_rate=sample_rate,
start_time=start_time,
end_time=end_time,
date=night,
creation_time=creation_time,
creating_processor=detector)
if _SHOW_CLIPS:
print('Clip', _clip_count, clip)
clip.save()
if _CLIP_FILES_AVAILABLE and _COPY_CLIP_FILES:
try:
_copy_clip_audio_file(file_path, clip)
except Exception as e:
print((
'Copy failed for clip file "{}". '
'Error message was: {}. '
'Clip will be ignored.').format(file_path, str(e)))
num_rejected += 1
continue
if c.clip_class_name is not None:
# TODO: When this script becomes an importer, add the
# creating job to the following.
model_utils.annotate_clip(
clip, annotation_info, c.clip_class_name)
num_added += 1
return (num_added, num_rejected, num_excluded)
def _pause_if_indicated():
if _pause_file_exists():
print('pausing...')
while True:
time.sleep(1)
if not _pause_file_exists():
print('resuming...')
break
def _pause_file_exists():
return os.path.exists(_PAUSE_FILE_PATH)
def _include_clip(clip):
return True
name = clip.clip_class_name
if name is None or name == 'Outside':
return False
elif name.startswith('Call'):
return True
else:
return random.random() <= _NON_CALL_CLIP_INCLUSION_PROBABILITY
def _get_clip_recording_channel(clip):
station, _, recorder_input = _get_recording_channel_info(clip.station.name)
# In an ideal world, we would just use `RecordingChannel.objects.get`
# here to get the unique channel that contains the clip. Unfortunately,
# however, the actual and purported sample rates of a recording tend to
# differ, and when the actual sample rate is higher than the purported
# one, the time intervals associated with consecutive recordings
# overlap. In such situations a given clip may have come from either
# the end of one recording or the beginning of the next.
channel = RecordingChannel.objects.filter(
recording__station=station,
recording__recorder=recorder_input.device,
recording__start_time__lte=clip.start_time,
recording__end_time__gt=clip.start_time,
channel_num=recorder_input.channel_num
).order_by('recording__start_time').first()
if channel is None:
raise Exception()
return channel
def _get_detector(clip, detectors):
# TODO: Should manual clips be created by a particular user?
if clip.detector_name == 'Manual':
return None
else:
try:
return detectors[clip.detector_name]
except KeyError:
raise ValueError(
'Unrecognized detector "{}".'.format(clip.detector_name))
def _get_annotation_info(name, annotation_infos):
try:
return annotation_infos[name]
except KeyError:
raise ValueError('Unrecognized annotation "{}".'.format(name))
def _copy_clip_audio_file(file_path, clip):
# TODO: Would it be significantly faster to copy files via the OS
# rather than reading their contents and then writing them?
with open(file_path, 'rb') as file_:
contents = file_.read()
clip_file_path = clip_manager.instance.get_audio_file_path(clip)
os_utils.create_parent_directory(clip_file_path)
with open(clip_file_path, 'wb') as file_:
file_.write(contents)
# print('Wrote file "{}" for clip {}.'.format(clip_file_path, clip.id))
if __name__ == '__main__':
_main()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
client.go | package pulse
import (
"fmt"
"net"
"os"
"path"
"sync"
"github.com/jfreymuth/pulse/proto"
)
// The Client is the connection to the pulseaudio server. An application typically only uses a single client.
type Client struct {
conn net.Conn
c *proto.Client
mu sync.Mutex
playback map[uint32]*PlaybackStream
record map[uint32]*RecordStream
server string
props proto.PropList
}
// NewClient connects to the server.
func NewClient(opts ...ClientOption) (*Client, error) {
c := &Client{
props: proto.PropList{
"media.name": proto.PropListString("go audio"),
"application.name": proto.PropListString(path.Base(os.Args[0])),
"application.icon_name": proto.PropListString("audio-x-generic"),
"application.process.id": proto.PropListString(fmt.Sprintf("%d", os.Getpid())),
"application.process.binary": proto.PropListString(os.Args[0]),
"window.x11.display": proto.PropListString(os.Getenv("DISPLAY")),
},
}
for _, opt := range opts {
opt(c)
}
var err error
c.c, c.conn, err = proto.Connect(c.server)
if err != nil {
return nil, err
}
err = c.c.Request(&proto.SetClientName{Props: c.props}, &proto.SetClientNameReply{})
if err != nil {
c.conn.Close()
return nil, err
}
c.playback = make(map[uint32]*PlaybackStream)
c.record = make(map[uint32]*RecordStream)
c.c.Callback = func(msg interface{}) {
switch msg := msg.(type) {
case *proto.Request:
c.mu.Lock()
stream, ok := c.playback[msg.StreamIndex]
c.mu.Unlock()
if ok {
stream.request <- int(msg.Length)
}
case *proto.DataPacket:
c.mu.Lock()
stream, ok := c.record[msg.StreamIndex]
c.mu.Unlock()
if ok {
stream.write(msg.Data)
}
case *proto.Started:
c.mu.Lock()
stream, ok := c.playback[msg.StreamIndex]
c.mu.Unlock()
if ok && stream.state == running && !stream.underflow {
stream.started <- true
}
case *proto.Underflow:
c.mu.Lock()
stream, ok := c.playback[msg.StreamIndex]
c.mu.Unlock()
if ok {
if stream.state == running {
stream.underflow = true
}
}
case *proto.ConnectionClosed:
c.mu.Lock()
for _, p := range c.playback {
close(p.request)
p.err = ErrConnectionClosed
p.state = serverLost
}
for _, r := range c.record {
r.err = ErrConnectionClosed
r.state = serverLost
}
c.playback = make(map[uint32]*PlaybackStream)
c.record = make(map[uint32]*RecordStream)
c.mu.Unlock()
c.conn.Close()
default:
//fmt.Printf("%#v\n", msg)
}
}
return c, nil
}
// Close closes the client. Calling methods on a closed client may panic.
func (c *Client) Close() {
c.conn.Close()
}
// A ClientOption supplies configuration when creating the client.
type ClientOption func(*Client)
// ClientApplicationName sets the application name.
// This will e.g. be displayed by a volume control application to identity the application.
// It should be human-readable and localized.
func ClientApplicationName(name string) ClientOption {
return func(c *Client) { c.props["application.name"] = proto.PropListString(name) }
}
// ClientApplicationIconName sets the application icon using an xdg icon name.
// This will e.g. be displayed by a volume control application to identity the application.
func ClientApplicationIconName(name string) ClientOption {
return func(c *Client) { c.props["application.icon_name"] = proto.PropListString(name) }
}
// ClientServerString will override the default server strings.
// Server strings are used to connect to the server. For the server string format see
// https://www.freedesktop.org/wiki/Software/PulseAudio/Documentation/User/ServerStrings/
func ClientServerString(s string) ClientOption {
return func(c *Client) { c.server = s }
}
// RawRequest can be used to send arbitrary requests.
//
// req should be one of the request types defined by the proto package.
//
// rpl must be a pointer to the correct reply type or nil. This funcion will panic if rpl has the wrong type.
//
// The returned error can be compared against errors defined by the proto package to check for specific errors.
//
// The function will always block until the server has replied, even if rpl is nil.
func (c *Client) RawRequest(req proto.RequestArgs, rpl proto.Reply) error {
return c.c.Request(req, rpl)
}
// ErrConnectionClosed is a special error value indicating that the server closed the connection.
const ErrConnectionClosed = pulseError("pulseaudio: connection closed")
type pulseError string
func (e pulseError) Error() string { return string(e) }
| [
"\"DISPLAY\""
]
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | go | 1 | 0 | |
misc/test/google_test.go | //go:build Gcp || all
// +build Gcp all
package test
import (
"fmt"
"os"
"path"
"testing"
"time"
"github.com/pulumi/pulumi/pkg/v3/testing/integration"
"github.com/stretchr/testify/assert"
)
func TestAccGcpGoFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["function"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoFunctionsRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-functions-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["function"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoGke(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-gke"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello Kubernetes bootcamp!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoInstance(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-instance"),
})
integration.ProgramTest(t, &test)
}
func TestAccGcpGoWebserver(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-go-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["instanceIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpJsWebserver(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-js-webserver"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["instanceIP"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello, World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["fxn_url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Space Needle, Seattle, WA")
})
},
// TODO[pulumi/examples#859]: Currently this examples leads to a no-op preview diff of:
// -- gcp:storage:BucketObject eta_demo_object delete original
// +- gcp:storage:BucketObject eta_demo_object replace [diff: ~name]
// ++ gcp:storage:BucketObject eta_demo_object create replacement [diff: ~name]
// ~ gcp:cloudfunctions:Function eta_demo_function update [diff: ~sourceArchiveObject]
AllowEmptyPreviewChanges: true,
AllowEmptyUpdateChanges: true,
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyServerlessRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-serverless-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["go_endpoint"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
assertHTTPResult(t, stack.Outputs["python_endpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpPyInstanceNginx(t *testing.T) {
t.Skip("Skip due to frequent failures: `35.239.87.214:80: connect: connection refused`")
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-py-instance-nginx"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["external_ip"].(string)
maxWait := time.Minute * 10
assertHTTPResultWithRetry(t, endpoint, nil, maxWait, func(body string) bool {
return assert.Contains(t, body, "Test Page for the Nginx HTTP Server on Fedora")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsFunctions(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-functions"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["url"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Greetings from Google Cloud Functions!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsServerlessRaw(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-serverless-raw"),
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["goEndpoint"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
assertHTTPResult(t, stack.Outputs["pythonEndpoint"].(string), nil, func(body string) bool {
return assert.Contains(t, body, "Hello World!")
})
},
})
integration.ProgramTest(t, &test)
}
func TestAccGcpTsCloudRun(t *testing.T) {
test := getGoogleBase(t).
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "..", "..", "gcp-ts-cloudrun"),
RunUpdateTest: false,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
endpoint := stack.Outputs["rubyUrl"].(string)
assertHTTPResult(t, endpoint, nil, func(body string) bool {
return assert.Contains(t, body, "Hello Pulumi!")
})
},
// TODO[pulumi/examples#859]: Currently this examples leads to a no-op preview diff of:
// ~ gcp:cloudrun:Service ruby update [diff: ~template]
AllowEmptyPreviewChanges: true,
AllowEmptyUpdateChanges: true,
})
integration.ProgramTest(t, &test)
}
func getGoogleProject() string {
project := os.Getenv("GOOGLE_PROJECT")
if project == "" {
project = "pulumi-ci-gcp-provider"
fmt.Println("Defaulting GOOGLE_PROJECT to 'pulumi-ci-gcp-provider'. You can override using the GOOGLE_PROJECT variable")
}
return project
}
func getGoogleZone() string {
zone := os.Getenv("GOOGLE_ZONE")
if zone == "" {
zone = "us-central1-a"
fmt.Println("Defaulting GOOGLE_ZONE to 'us-central1-a'. You can override using the GOOGLE_ZONE variable")
}
return zone
}
func getGkeVersion() string {
gkeEngineVersion := os.Getenv("GKE_ENGINE_VERSION")
if gkeEngineVersion == "" {
gkeEngineVersion = "1.13.7-gke.24"
fmt.Println("Defaulting GKE_ENGINE_VERSION to '1.13.7-gke.24'. You can override using the GKE_ENGINE_VERSION variable")
}
return gkeEngineVersion
}
func getGoogleBase(t *testing.T) integration.ProgramTestOptions {
googleZone := getGoogleZone()
googleProject := getGoogleProject()
base := getBaseOptions(t)
gkeBase := base.With(integration.ProgramTestOptions{
Config: map[string]string{
"gcp:project": googleProject,
"gcp:zone": googleZone,
},
})
return gkeBase
}
| [
"\"GOOGLE_PROJECT\"",
"\"GOOGLE_ZONE\"",
"\"GKE_ENGINE_VERSION\""
]
| []
| [
"GOOGLE_PROJECT",
"GKE_ENGINE_VERSION",
"GOOGLE_ZONE"
]
| [] | ["GOOGLE_PROJECT", "GKE_ENGINE_VERSION", "GOOGLE_ZONE"] | go | 3 | 0 | |
vendor/github.com/openshift/origin/pkg/oc/admin/image/verify-signature.go | package image
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/containers/image/docker/policyconfiguration"
"github.com/containers/image/docker/reference"
"github.com/containers/image/signature"
sigtypes "github.com/containers/image/types"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kclientcmd "k8s.io/client-go/tools/clientcmd"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
imageapi "github.com/openshift/origin/pkg/image/apis/image"
imageclient "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
)
var (
verifyImageSignatureLongDesc = templates.LongDesc(`
Verifies the image signature of an image imported to internal registry using the local public GPG key.
This command verifies if the image identity contained in the image signature can be trusted
by using the public GPG key to verify the signature itself and matching the provided expected identity
with the identity (pull spec) of the given image.
By default, this command will use the public GPG keyring located in "$GNUPGHOME/.gnupg/pubring.gpg"
By default, this command will not save the result of the verification back to the image object, to do so
user have to specify the "--save" flag. Note that to modify the image signature verification status,
user have to have permissions to edit an image object (usually an "image-auditor" role).
Note that using the "--save" flag on already verified image together with invalid GPG
key or invalid expected identity will cause the saved verification status to be removed
and the image will become "unverified".
If this command is outside the cluster, users have to specify the "--registry-url" parameter
with the public URL of image registry.
To remove all verifications, users can use the "--remove-all" flag.
`)
verifyImageSignatureExample = templates.Examples(`
# Verify the image signature and identity using the local GPG keychain
%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
--expected-identity=registry.local:5000/foo/bar:v1
# Verify the image signature and identity using the local GPG keychain and save the status
%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
--expected-identity=registry.local:5000/foo/bar:v1 --save
# Verify the image signature and identity via exposed registry route
%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \
--expected-identity=registry.local:5000/foo/bar:v1 \
--registry-url=docker-registry.foo.com
# Remove all signature verifications from the image
%[1]s sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 --remove-all
`)
)
type VerifyImageSignatureOptions struct {
InputImage string
ExpectedIdentity string
PublicKeyFilename string
PublicKey []byte
Save bool
RemoveAll bool
CurrentUser string
CurrentUserToken string
RegistryURL string
Insecure bool
ImageClient imageclient.ImageInterface
clientConfig kclientcmd.ClientConfig
Out io.Writer
ErrOut io.Writer
}
const (
VerifyRecommendedName = "verify-image-signature"
)
func NewCmdVerifyImageSignature(name, fullName string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command {
opts := &VerifyImageSignatureOptions{
ErrOut: errOut,
Out: out,
clientConfig: f.OpenShiftClientConfig(),
// TODO: This improves the error message users get when containers/image is not able
// to locate the pubring.gpg file (which is default).
// This should be improved/fixed in containers/image.
PublicKeyFilename: filepath.Join(os.Getenv("GNUPGHOME"), "pubring.gpg"),
}
cmd := &cobra.Command{
Use: fmt.Sprintf("%s IMAGE --expected-identity=EXPECTED_IDENTITY [--save]", VerifyRecommendedName),
Short: "Verify the image identity contained in the image signature",
Long: verifyImageSignatureLongDesc,
Example: fmt.Sprintf(verifyImageSignatureExample, fullName),
Run: func(cmd *cobra.Command, args []string) {
kcmdutil.CheckErr(opts.Validate())
kcmdutil.CheckErr(opts.Complete(f, cmd, args, out))
kcmdutil.CheckErr(opts.Run())
},
}
cmd.Flags().StringVar(&opts.ExpectedIdentity, "expected-identity", opts.ExpectedIdentity, "An expected image docker reference to verify (required).")
cmd.Flags().BoolVar(&opts.Save, "save", opts.Save, "If true, the result of the verification will be saved to an image object.")
cmd.Flags().BoolVar(&opts.RemoveAll, "remove-all", opts.RemoveAll, "If set, all signature verifications will be removed from the given image.")
cmd.Flags().StringVar(&opts.PublicKeyFilename, "public-key", opts.PublicKeyFilename, fmt.Sprintf("A path to a public GPG key to be used for verification. (defaults to %q)", opts.PublicKeyFilename))
cmd.Flags().StringVar(&opts.RegistryURL, "registry-url", opts.RegistryURL, "The address to use when contacting the registry, instead of using the internal cluster address. This is useful if you can't resolve or reach the internal registry address.")
cmd.Flags().BoolVar(&opts.Insecure, "insecure", opts.Insecure, "If set, use the insecure protocol for registry communication.")
return cmd
}
func (o *VerifyImageSignatureOptions) Validate() error {
if !o.RemoveAll {
if len(o.ExpectedIdentity) == 0 {
return errors.New("the --expected-identity is required")
}
if _, err := imageapi.ParseDockerImageReference(o.ExpectedIdentity); err != nil {
return errors.New("the --expected-identity must be valid image reference")
}
}
if o.RemoveAll && len(o.ExpectedIdentity) > 0 {
return errors.New("the --expected-identity cannot be used when removing all verifications")
}
return nil
}
func (o *VerifyImageSignatureOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
if len(args) != 1 {
return kcmdutil.UsageError(cmd, "exactly one image must be specified")
}
o.InputImage = args[0]
var err error
if len(o.PublicKeyFilename) > 0 {
if o.PublicKey, err = ioutil.ReadFile(o.PublicKeyFilename); err != nil {
return fmt.Errorf("unable to read --public-key: %v", err)
}
}
imageClient, err := f.OpenshiftInternalImageClient()
if err != nil {
return err
}
o.ImageClient = imageClient.Image()
userClient, err := f.OpenshiftInternalUserClient()
if err != nil {
return err
}
// We need the current user name so we can record it into an verification condition and
// we need a bearer token so we can fetch the manifest from the registry.
// TODO: Add support for external registries (currently only integrated registry will
if me, err := userClient.User().Users().Get("~", metav1.GetOptions{}); err != nil {
return err
} else {
o.CurrentUser = me.Name
if config, err := o.clientConfig.ClientConfig(); err != nil {
return err
} else {
if o.CurrentUserToken = config.BearerToken; len(o.CurrentUserToken) == 0 {
return fmt.Errorf("no token is currently in use for this session")
}
}
}
return nil
}
func (o VerifyImageSignatureOptions) Run() error {
img, err := o.ImageClient.Images().Get(o.InputImage, metav1.GetOptions{})
if err != nil {
return err
}
if len(img.Signatures) == 0 {
return fmt.Errorf("%s does not have any signature", img.Name)
}
pr, err := signature.NewPRSignedByKeyPath(signature.SBKeyTypeGPGKeys, o.PublicKeyFilename, signature.NewPRMMatchRepoDigestOrExact())
if err != nil {
return fmt.Errorf("unable to prepare verification policy requirements: %v", err)
}
policy := signature.Policy{Default: []signature.PolicyRequirement{pr}}
pc, err := signature.NewPolicyContext(&policy)
if err != nil {
return fmt.Errorf("unable to setup policy: %v", err)
}
defer pc.Destroy()
if o.RemoveAll {
img.Signatures = []imageapi.ImageSignature{}
}
for i, s := range img.Signatures {
// Verify the signature against the policy
signedBy, err := o.verifySignature(pc, img, s.Content)
if err != nil {
fmt.Fprintf(o.ErrOut, "error verifying signature %s for image %s (verification status will be removed): %v\n", img.Signatures[i].Name, o.InputImage, err)
img.Signatures[i] = imageapi.ImageSignature{}
continue
}
fmt.Fprintf(o.Out, "image %q identity is now confirmed (signed by GPG key %q)\n", o.InputImage, signedBy)
now := metav1.Now()
newConditions := []imageapi.SignatureCondition{
{
Type: imageapi.SignatureTrusted,
Status: kapi.ConditionTrue,
LastProbeTime: now,
LastTransitionTime: now,
Reason: "manually verified",
Message: fmt.Sprintf("verified by user %q", o.CurrentUser),
},
// TODO: This should be not needed (need to relax validation).
{
Type: imageapi.SignatureForImage,
Status: kapi.ConditionTrue,
LastProbeTime: now,
LastTransitionTime: now,
},
}
img.Signatures[i].Conditions = newConditions
img.Signatures[i].IssuedBy = &imageapi.SignatureIssuer{}
// TODO: This should not be just a key id but a human-readable identity.
img.Signatures[i].IssuedBy.CommonName = signedBy
}
if o.Save || o.RemoveAll {
_, err := o.ImageClient.Images().Update(img)
return err
}
return nil
}
// getImageManifest fetches the manifest for provided image from the integrated registry.
func (o *VerifyImageSignatureOptions) getImageManifest(img *imageapi.Image) ([]byte, error) {
parsed, err := imageapi.ParseDockerImageReference(img.DockerImageReference)
if err != nil {
return nil, err
}
registryURL := parsed.RegistryURL()
if len(o.RegistryURL) > 0 {
registryURL = &url.URL{Host: o.RegistryURL, Scheme: "https"}
if o.Insecure {
registryURL.Scheme = ""
}
}
return getImageManifestByIDFromRegistry(registryURL, parsed.RepositoryName(), img.Name, o.CurrentUser, o.CurrentUserToken, o.Insecure)
}
// verifySignature takes policy, image and the image signature blob and verifies that the
// signature was signed by a trusted key, the expected identity matches the one in the
// signature message and the manifest matches as well.
// In case the image identity is confirmed, this function returns the matching GPG key in
// short form, otherwise it returns rejection reason.
func (o *VerifyImageSignatureOptions) verifySignature(pc *signature.PolicyContext, img *imageapi.Image, sigBlob []byte) (string, error) {
manifest, err := o.getImageManifest(img)
if err != nil {
return "", fmt.Errorf("failed to get image %q manifest: %v", img.Name, err)
}
allowed, err := pc.IsRunningImageAllowed(newUnparsedImage(o.ExpectedIdentity, sigBlob, manifest))
if !allowed && err == nil {
return "", errors.New("signature rejected but no error set")
}
if err != nil {
return "", fmt.Errorf("signature rejected: %v", err)
}
if untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(sigBlob); err != nil {
// Tis is treated as an unverified signature. It really shouldn’t happen anyway.
return "", fmt.Errorf("error getting signing key identity: %v", err)
} else {
return untrustedInfo.UntrustedShortKeyIdentifier, nil
}
}
// dummyDockerTransport is containers/image/docker.Transport, except that it only provides identity information.
var dummyDockerTransport = dockerTransport{}
type dockerTransport struct{}
func (t dockerTransport) Name() string {
return "docker"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func (t dockerTransport) ParseReference(reference string) (sigtypes.ImageReference, error) {
return parseDockerReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
// FIXME? We could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// dummyDockerReference is containers/image/docker.Reference, except that only provides identity information.
type dummyDockerReference struct{ ref reference.Named }
// parseDockerReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
func parseDockerReference(refString string) (sigtypes.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
if err != nil {
return nil, err
}
ref = reference.TagNameOnly(ref)
if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
// tag and digest), so fail. This MAY be accepted in the future.
// (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
// the tag or the digest first?)
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported")
}
return dummyDockerReference{
ref: ref,
}, nil
}
func (ref dummyDockerReference) Transport() sigtypes.ImageTransport {
return dummyDockerTransport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dummyDockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref dummyDockerReference) DockerReference() reference.Named {
return ref.ref
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
// (i.e. various references with exactly the same semantics should return the same configuration identity)
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref dummyDockerReference) PolicyConfigurationIdentity() string {
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
}
return res
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref dummyDockerReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
}
func (ref dummyDockerReference) NewImage(ctx *sigtypes.SystemContext) (sigtypes.Image, error) {
panic("Unimplemented")
}
func (ref dummyDockerReference) NewImageSource(ctx *sigtypes.SystemContext, requestedManifestMIMETypes []string) (sigtypes.ImageSource, error) {
panic("Unimplemented")
}
func (ref dummyDockerReference) NewImageDestination(ctx *sigtypes.SystemContext) (sigtypes.ImageDestination, error) {
panic("Unimplemented")
}
func (ref dummyDockerReference) DeleteImage(ctx *sigtypes.SystemContext) error {
panic("Unimplemented")
}
// unparsedImage implements sigtypes.UnparsedImage, to allow evaluating the signature policy
// against an image without having to make it pullable by containers/image
type unparsedImage struct {
ref sigtypes.ImageReference
manifest []byte
signature []byte
}
func newUnparsedImage(expectedIdentity string, signature, manifest []byte) sigtypes.UnparsedImage {
// We check the error in Validate()
ref, _ := parseDockerReference("//" + expectedIdentity)
return &unparsedImage{ref: ref, manifest: manifest, signature: signature}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (ui *unparsedImage) Reference() sigtypes.ImageReference {
return ui.ref
}
// Close removes resources associated with an initialized UnparsedImage, if any.
func (ui *unparsedImage) Close() error {
return nil
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (ui *unparsedImage) Manifest() ([]byte, string, error) {
return ui.manifest, "", nil
}
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (ui *unparsedImage) Signatures(context.Context) ([][]byte, error) {
return [][]byte{ui.signature}, nil
}
| [
"\"GNUPGHOME\""
]
| []
| [
"GNUPGHOME"
]
| [] | ["GNUPGHOME"] | go | 1 | 0 | |
Lib/tkinter/__init__.py | """Wrapper functions for Tcl/Tk.
Tkinter provides classes which allow the display, positioning and
control of widgets. Toplevel widgets are Tk and Toplevel. Other
widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox
LabelFrame and PanedWindow.
Properties of the widgets are specified with keyword arguments.
Keyword arguments have the same name as the corresponding resource
under Tk.
Widgets are positioned with one of the geometry managers Place, Pack
or Grid. These managers can be called with methods place, pack, grid
available in every Widget.
Actions are bound to events by resources (e.g. keyword argument
command) or with the method bind.
Example (Hello, World):
import tkinter
from tkinter.constants import *
tk = tkinter.Tk()
frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
frame.pack(fill=BOTH,expand=1)
label = tkinter.Label(frame, text="Hello, World")
label.pack(fill=X, expand=1)
button = tkinter.Button(frame,text="Exit",command=tk.destroy)
button.pack(side=BOTTOM)
tk.mainloop()
"""
__version__ = "$Revision$"
import sys
if sys.platform == "win32":
# Attempt to configure Tcl/Tk without requiring PATH
from tkinter import _fix
import _tkinter # If this fails your Python may not be configured for Tk
TclError = _tkinter.TclError
from tkinter.constants import *
wantobjects = 1
TkVersion = float(_tkinter.TK_VERSION)
TclVersion = float(_tkinter.TCL_VERSION)
READABLE = _tkinter.READABLE
WRITABLE = _tkinter.WRITABLE
EXCEPTION = _tkinter.EXCEPTION
def _flatten(seq):
"""Internal function."""
res = ()
for item in seq:
if isinstance(item, (tuple, list)):
res = res + _flatten(item)
elif item is not None:
res = res + (item,)
return res
try: _flatten = _tkinter._flatten
except AttributeError: pass
def _cnfmerge(cnfs):
"""Internal function."""
if isinstance(cnfs, dict):
return cnfs
elif isinstance(cnfs, (type(None), str)):
return cnfs
else:
cnf = {}
for c in _flatten(cnfs):
try:
cnf.update(c)
except (AttributeError, TypeError) as msg:
print("_cnfmerge: fallback due to:", msg)
for k, v in c.items():
cnf[k] = v
return cnf
try: _cnfmerge = _tkinter._cnfmerge
except AttributeError: pass
class Event:
"""Container for the properties of an event.
Instances of this type are generated if one of the following events occurs:
KeyPress, KeyRelease - for keyboard events
ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
Colormap, Gravity, Reparent, Property, Destroy, Activate,
Deactivate - for window events.
If a callback function for one of these events is registered
using bind, bind_all, bind_class, or tag_bind, the callback is
called with an Event as first argument. It will have the
following attributes (in braces are the event types for which
the attribute is valid):
serial - serial number of event
num - mouse button pressed (ButtonPress, ButtonRelease)
focus - whether the window has the focus (Enter, Leave)
height - height of the exposed window (Configure, Expose)
width - width of the exposed window (Configure, Expose)
keycode - keycode of the pressed key (KeyPress, KeyRelease)
state - state of the event as a number (ButtonPress, ButtonRelease,
Enter, KeyPress, KeyRelease,
Leave, Motion)
state - state as a string (Visibility)
time - when the event occurred
x - x-position of the mouse
y - y-position of the mouse
x_root - x-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
y_root - y-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
char - pressed character (KeyPress, KeyRelease)
send_event - see X/Windows documentation
keysym - keysym of the event as a string (KeyPress, KeyRelease)
keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
type - type of the event as a number
widget - widget in which the event occurred
delta - delta of wheel movement (MouseWheel)
"""
pass
_support_default_root = 1
_default_root = None
def NoDefaultRoot():
"""Inhibit setting of default root window.
Call this function to inhibit that the first instance of
Tk is used for windows without an explicit parent window.
"""
global _support_default_root
_support_default_root = 0
global _default_root
_default_root = None
del _default_root
def _tkerror(err):
"""Internal function."""
pass
def _exit(code='0'):
"""Internal function. Calling it will throw the exception SystemExit."""
raise SystemExit(code)
_varnum = 0
class Variable:
"""Class to define value holders for e.g. buttons.
Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations
that constrain the type of the value returned from get()."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a variable
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
global _varnum
if not master:
master = _default_root
self._master = master
self._tk = master.tk
if name:
self._name = name
else:
self._name = 'PY_VAR' + repr(_varnum)
_varnum += 1
if value is not None:
self.set(value)
elif not self._tk.call("info", "exists", self._name):
self.set(self._default)
def __del__(self):
"""Unset the variable in Tcl."""
self._tk.globalunsetvar(self._name)
def __str__(self):
"""Return the name of the variable in Tcl."""
return self._name
def set(self, value):
"""Set the variable to VALUE."""
return self._tk.globalsetvar(self._name, value)
def get(self):
"""Return value of variable."""
return self._tk.globalgetvar(self._name)
def trace_variable(self, mode, callback):
"""Define a trace callback for the variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CALLBACK must be a function which is called when
the variable is read, written or undefined.
Return the name of the callback.
"""
cbname = self._master._register(callback)
self._tk.call("trace", "variable", self._name, mode, cbname)
return cbname
trace = trace_variable
def trace_vdelete(self, mode, cbname):
"""Delete the trace callback for a variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CBNAME is the name of the callback returned from trace_variable or trace.
"""
self._tk.call("trace", "vdelete", self._name, mode, cbname)
self._master.deletecommand(cbname)
def trace_vinfo(self):
"""Return all trace callback information."""
return map(self._tk.split, self._tk.splitlist(
self._tk.call("trace", "vinfo", self._name)))
def __eq__(self, other):
"""Comparison for equality (==).
Note: if the Variable's master matters to behavior
also compare self._master == other._master
"""
return self.__class__.__name__ == other.__class__.__name__ \
and self._name == other._name
class StringVar(Variable):
"""Value holder for strings variables."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a string variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return value of variable as string."""
value = self._tk.globalgetvar(self._name)
if isinstance(value, str):
return value
return str(value)
class IntVar(Variable):
"""Value holder for integer variables."""
_default = 0
def __init__(self, master=None, value=None, name=None):
"""Construct an integer variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def set(self, value):
"""Set the variable to value, converting booleans to integers."""
if isinstance(value, bool):
value = int(value)
return Variable.set(self, value)
def get(self):
"""Return the value of the variable as an integer."""
return getint(self._tk.globalgetvar(self._name))
class DoubleVar(Variable):
"""Value holder for float variables."""
_default = 0.0
def __init__(self, master=None, value=None, name=None):
"""Construct a float variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0.0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a float."""
return getdouble(self._tk.globalgetvar(self._name))
class BooleanVar(Variable):
"""Value holder for boolean variables."""
_default = False
def __init__(self, master=None, value=None, name=None):
"""Construct a boolean variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to False)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a bool."""
return self._tk.getboolean(self._tk.globalgetvar(self._name))
def mainloop(n=0):
"""Run the main loop of Tcl."""
_default_root.tk.mainloop(n)
getint = int
getdouble = float
def getboolean(s):
"""Convert true and false to integer values 1 and 0."""
return _default_root.tk.getboolean(s)
# Methods defined on both toplevel and interior widgets
class Misc:
"""Internal class.
Base class which defines methods common for interior widgets."""
# XXX font command?
_tclCommands = None
def destroy(self):
"""Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter."""
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
self._tclCommands = None
def deletecommand(self, name):
"""Internal function.
Delete the Tcl command provided in NAME."""
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
try:
self._tclCommands.remove(name)
except ValueError:
pass
def tk_strictMotif(self, boolean=None):
"""Set Tcl internal variable, whether the look and feel
should adhere to Motif.
A parameter of 1 means adhere to Motif (e.g. no color
change if mouse passes over slider).
Returns the set value."""
return self.tk.getboolean(self.tk.call(
'set', 'tk_strictMotif', boolean))
def tk_bisque(self):
"""Change the color scheme to light brown as used in Tk 3.6 and before."""
self.tk.call('tk_bisque')
def tk_setPalette(self, *args, **kw):
"""Set a new color scheme for all widget elements.
A single color as argument will cause that all colors of Tk
widget elements are derived from this.
Alternatively several keyword parameters and its associated
colors can be given. The following keywords are valid:
activeBackground, foreground, selectColor,
activeForeground, highlightBackground, selectBackground,
background, highlightColor, selectForeground,
disabledForeground, insertBackground, troughColor."""
self.tk.call(('tk_setPalette',)
+ _flatten(args) + _flatten(kw.items()))
def tk_menuBar(self, *args):
"""Do not use. Needed in Tk 3.6 and earlier."""
pass # obsolete since Tk 4.0
def wait_variable(self, name='PY_VAR'):
"""Wait until the variable is modified.
A parameter of type IntVar, StringVar, DoubleVar or
BooleanVar must be given."""
self.tk.call('tkwait', 'variable', name)
waitvar = wait_variable # XXX b/w compat
def wait_window(self, window=None):
"""Wait until a WIDGET is destroyed.
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'window', window._w)
def wait_visibility(self, window=None):
"""Wait until the visibility of a WIDGET changes
(e.g. it appears).
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'visibility', window._w)
def setvar(self, name='PY_VAR', value='1'):
"""Set Tcl variable NAME to VALUE."""
self.tk.setvar(name, value)
def getvar(self, name='PY_VAR'):
"""Return value of Tcl variable NAME."""
return self.tk.getvar(name)
getint = int
getdouble = float
def getboolean(self, s):
"""Return a boolean value for Tcl boolean values true and false given as parameter."""
return self.tk.getboolean(s)
def focus_set(self):
"""Direct input focus to this widget.
If the application currently does not have the focus
this widget will get the focus if the application gets
the focus through the window manager."""
self.tk.call('focus', self._w)
focus = focus_set # XXX b/w compat?
def focus_force(self):
"""Direct input focus to this widget even if the
application does not have the focus. Use with
caution!"""
self.tk.call('focus', '-force', self._w)
def focus_get(self):
"""Return the widget which has currently the focus in the
application.
Use focus_displayof to allow working with several
displays. Return None if application does not have
the focus."""
name = self.tk.call('focus')
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_displayof(self):
"""Return the widget which has currently the focus on the
display where this widget is located.
Return None if the application does not have the focus."""
name = self.tk.call('focus', '-displayof', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_lastfor(self):
"""Return the widget which would have the focus if top level
for this widget gets the focus from the window manager."""
name = self.tk.call('focus', '-lastfor', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def tk_focusFollowsMouse(self):
"""The widget under mouse will get automatically focus. Can not
be disabled easily."""
self.tk.call('tk_focusFollowsMouse')
def tk_focusNext(self):
"""Return the next widget in the focus order which follows
widget which has currently the focus.
The focus order first goes to the next child, then to
the children of the child recursively and then to the
next sibling which is higher in the stacking order. A
widget is omitted if it has the takefocus resource set
to 0."""
name = self.tk.call('tk_focusNext', self._w)
if not name: return None
return self._nametowidget(name)
def tk_focusPrev(self):
"""Return previous widget in the focus order. See tk_focusNext for details."""
name = self.tk.call('tk_focusPrev', self._w)
if not name: return None
return self._nametowidget(name)
def after(self, ms, func=None, *args):
"""Call function once after given time.
MS specifies the time in milliseconds. FUNC gives the
function which shall be called. Additional parameters
are given as parameters to the function call. Return
identifier to cancel scheduling with after_cancel."""
if not func:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
else:
def callit():
try:
func(*args)
finally:
try:
self.deletecommand(name)
except TclError:
pass
name = self._register(callit)
return self.tk.call('after', ms, name)
def after_idle(self, func, *args):
"""Call FUNC once if the Tcl main loop has no event to
process.
Return an identifier to cancel the scheduling with
after_cancel."""
return self.after('idle', func, *args)
def after_cancel(self, id):
"""Cancel scheduling of function identified with ID.
Identifier returned by after or after_idle must be
given as first parameter."""
try:
data = self.tk.call('after', 'info', id)
# In Tk 8.3, splitlist returns: (script, type)
# In Tk 8.4, splitlist may return (script, type) or (script,)
script = self.tk.splitlist(data)[0]
self.deletecommand(script)
except TclError:
pass
self.tk.call('after', 'cancel', id)
def bell(self, displayof=0):
"""Ring a display's bell."""
self.tk.call(('bell',) + self._displayof(displayof))
# Clipboard handling:
def clipboard_get(self, **kw):
"""Retrieve data from the clipboard on window's display.
The window keyword defaults to the root window of the Tkinter
application.
The type keyword specifies the form in which the data is
to be returned and should be an atom name such as STRING
or FILE_NAME. Type defaults to STRING.
This command is equivalent to:
selection_get(CLIPBOARD)
"""
return self.tk.call(('clipboard', 'get') + self._options(kw))
def clipboard_clear(self, **kw):
"""Clear the data in the Tk clipboard.
A widget specified for the optional displayof keyword
argument specifies the target display."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'clear') + self._options(kw))
def clipboard_append(self, string, **kw):
"""Append STRING to the Tk clipboard.
A widget specified at the optional displayof keyword
argument specifies the target display. The clipboard
can be retrieved with selection_get."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'append') + self._options(kw)
+ ('--', string))
# XXX grab current w/o window argument
def grab_current(self):
"""Return widget which has currently the grab in this application
or None."""
name = self.tk.call('grab', 'current', self._w)
if not name: return None
return self._nametowidget(name)
def grab_release(self):
"""Release grab for this widget if currently set."""
self.tk.call('grab', 'release', self._w)
def grab_set(self):
"""Set grab for this widget.
A grab directs all events to this and descendant
widgets in the application."""
self.tk.call('grab', 'set', self._w)
def grab_set_global(self):
"""Set global grab for this widget.
A global grab directs all events to this and
descendant widgets on the display. Use with caution -
other applications do not get events anymore."""
self.tk.call('grab', 'set', '-global', self._w)
def grab_status(self):
"""Return None, "local" or "global" if this widget has
no, a local or a global grab."""
status = self.tk.call('grab', 'status', self._w)
if status == 'none': status = None
return status
def option_add(self, pattern, value, priority = None):
"""Set a VALUE (second parameter) for an option
PATTERN (first parameter).
An optional third parameter gives the numeric priority
(defaults to 80)."""
self.tk.call('option', 'add', pattern, value, priority)
def option_clear(self):
"""Clear the option database.
It will be reloaded if option_add is called."""
self.tk.call('option', 'clear')
def option_get(self, name, className):
"""Return the value for an option NAME for this widget
with CLASSNAME.
Values with higher priority override lower values."""
return self.tk.call('option', 'get', self._w, name, className)
def option_readfile(self, fileName, priority = None):
"""Read file FILENAME into the option database.
An optional second parameter gives the numeric
priority."""
self.tk.call('option', 'readfile', fileName, priority)
def selection_clear(self, **kw):
"""Clear the current X selection."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('selection', 'clear') + self._options(kw))
def selection_get(self, **kw):
"""Return the contents of the current X selection.
A keyword parameter selection specifies the name of
the selection and defaults to PRIMARY. A keyword
parameter displayof specifies a widget on the display
to use."""
if 'displayof' not in kw: kw['displayof'] = self._w
return self.tk.call(('selection', 'get') + self._options(kw))
def selection_handle(self, command, **kw):
"""Specify a function COMMAND to call if the X
selection owned by this widget is queried by another
application.
This function must return the contents of the
selection. The function will be called with the
arguments OFFSET and LENGTH which allows the chunking
of very long selections. The following keyword
parameters can be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
name = self._register(command)
self.tk.call(('selection', 'handle') + self._options(kw)
+ (self._w, name))
def selection_own(self, **kw):
"""Become owner of X selection.
A keyword parameter selection specifies the name of
the selection (default PRIMARY)."""
self.tk.call(('selection', 'own') +
self._options(kw) + (self._w,))
def selection_own_get(self, **kw):
"""Return owner of X selection.
The following keyword parameter can
be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
if 'displayof' not in kw: kw['displayof'] = self._w
name = self.tk.call(('selection', 'own') + self._options(kw))
if not name: return None
return self._nametowidget(name)
def send(self, interp, cmd, *args):
"""Send Tcl command CMD to different interpreter INTERP to be executed."""
return self.tk.call(('send', interp, cmd) + args)
def lower(self, belowThis=None):
"""Lower this widget in the stacking order."""
self.tk.call('lower', self._w, belowThis)
def tkraise(self, aboveThis=None):
"""Raise this widget in the stacking order."""
self.tk.call('raise', self._w, aboveThis)
lift = tkraise
def colormodel(self, value=None):
"""Useless. Not implemented in Tk."""
return self.tk.call('tk', 'colormodel', self._w, value)
def winfo_atom(self, name, displayof=0):
"""Return integer which represents atom NAME."""
args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
return getint(self.tk.call(args))
def winfo_atomname(self, id, displayof=0):
"""Return name of atom with identifier ID."""
args = ('winfo', 'atomname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_cells(self):
"""Return number of cells in the colormap for this widget."""
return getint(
self.tk.call('winfo', 'cells', self._w))
def winfo_children(self):
"""Return a list of all widgets which are children of this widget."""
result = []
for child in self.tk.splitlist(
self.tk.call('winfo', 'children', self._w)):
try:
# Tcl sometimes returns extra windows, e.g. for
# menus; those need to be skipped
result.append(self._nametowidget(child))
except KeyError:
pass
return result
def winfo_class(self):
"""Return window class name of this widget."""
return self.tk.call('winfo', 'class', self._w)
def winfo_colormapfull(self):
"""Return true if at the last color request the colormap was full."""
return self.tk.getboolean(
self.tk.call('winfo', 'colormapfull', self._w))
def winfo_containing(self, rootX, rootY, displayof=0):
"""Return the widget which is at the root coordinates ROOTX, ROOTY."""
args = ('winfo', 'containing') \
+ self._displayof(displayof) + (rootX, rootY)
name = self.tk.call(args)
if not name: return None
return self._nametowidget(name)
def winfo_depth(self):
"""Return the number of bits per pixel."""
return getint(self.tk.call('winfo', 'depth', self._w))
def winfo_exists(self):
"""Return true if this widget exists."""
return getint(
self.tk.call('winfo', 'exists', self._w))
def winfo_fpixels(self, number):
"""Return the number of pixels for the given distance NUMBER
(e.g. "3c") as float."""
return getdouble(self.tk.call(
'winfo', 'fpixels', self._w, number))
def winfo_geometry(self):
"""Return geometry string for this widget in the form "widthxheight+X+Y"."""
return self.tk.call('winfo', 'geometry', self._w)
def winfo_height(self):
"""Return height of this widget."""
return getint(
self.tk.call('winfo', 'height', self._w))
def winfo_id(self):
"""Return identifier ID for this widget."""
return self.tk.getint(
self.tk.call('winfo', 'id', self._w))
def winfo_interps(self, displayof=0):
"""Return the name of all Tcl interpreters for this display."""
args = ('winfo', 'interps') + self._displayof(displayof)
return self.tk.splitlist(self.tk.call(args))
def winfo_ismapped(self):
"""Return true if this widget is mapped."""
return getint(
self.tk.call('winfo', 'ismapped', self._w))
def winfo_manager(self):
"""Return the window mananger name for this widget."""
return self.tk.call('winfo', 'manager', self._w)
def winfo_name(self):
"""Return the name of this widget."""
return self.tk.call('winfo', 'name', self._w)
def winfo_parent(self):
"""Return the name of the parent of this widget."""
return self.tk.call('winfo', 'parent', self._w)
def winfo_pathname(self, id, displayof=0):
"""Return the pathname of the widget given by ID."""
args = ('winfo', 'pathname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_pixels(self, number):
"""Rounded integer value of winfo_fpixels."""
return getint(
self.tk.call('winfo', 'pixels', self._w, number))
def winfo_pointerx(self):
"""Return the x coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointerx', self._w))
def winfo_pointerxy(self):
"""Return a tuple of x and y coordinates of the pointer on the root window."""
return self._getints(
self.tk.call('winfo', 'pointerxy', self._w))
def winfo_pointery(self):
"""Return the y coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointery', self._w))
def winfo_reqheight(self):
"""Return requested height of this widget."""
return getint(
self.tk.call('winfo', 'reqheight', self._w))
def winfo_reqwidth(self):
"""Return requested width of this widget."""
return getint(
self.tk.call('winfo', 'reqwidth', self._w))
def winfo_rgb(self, color):
"""Return tuple of decimal values for red, green, blue for
COLOR in this widget."""
return self._getints(
self.tk.call('winfo', 'rgb', self._w, color))
def winfo_rootx(self):
"""Return x coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rootx', self._w))
def winfo_rooty(self):
"""Return y coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rooty', self._w))
def winfo_screen(self):
"""Return the screen name of this widget."""
return self.tk.call('winfo', 'screen', self._w)
def winfo_screencells(self):
"""Return the number of the cells in the colormap of the screen
of this widget."""
return getint(
self.tk.call('winfo', 'screencells', self._w))
def winfo_screendepth(self):
"""Return the number of bits per pixel of the root window of the
screen of this widget."""
return getint(
self.tk.call('winfo', 'screendepth', self._w))
def winfo_screenheight(self):
"""Return the number of pixels of the height of the screen of this widget
in pixel."""
return getint(
self.tk.call('winfo', 'screenheight', self._w))
def winfo_screenmmheight(self):
"""Return the number of pixels of the height of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmheight', self._w))
def winfo_screenmmwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmwidth', self._w))
def winfo_screenvisual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the default
colormodel of this screen."""
return self.tk.call('winfo', 'screenvisual', self._w)
def winfo_screenwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in pixel."""
return getint(
self.tk.call('winfo', 'screenwidth', self._w))
def winfo_server(self):
"""Return information of the X-Server of the screen of this widget in
the form "XmajorRminor vendor vendorVersion"."""
return self.tk.call('winfo', 'server', self._w)
def winfo_toplevel(self):
"""Return the toplevel widget of this widget."""
return self._nametowidget(self.tk.call(
'winfo', 'toplevel', self._w))
def winfo_viewable(self):
"""Return true if the widget and all its higher ancestors are mapped."""
return getint(
self.tk.call('winfo', 'viewable', self._w))
def winfo_visual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the
colormodel of this widget."""
return self.tk.call('winfo', 'visual', self._w)
def winfo_visualid(self):
"""Return the X identifier for the visual for this widget."""
return self.tk.call('winfo', 'visualid', self._w)
def winfo_visualsavailable(self, includeids=0):
"""Return a list of all visuals available for the screen
of this widget.
Each item in the list consists of a visual name (see winfo_visual), a
depth and if INCLUDEIDS=1 is given also the X identifier."""
data = self.tk.split(
self.tk.call('winfo', 'visualsavailable', self._w,
includeids and 'includeids' or None))
if isinstance(data, str):
data = [self.tk.split(data)]
return map(self.__winfo_parseitem, data)
def __winfo_parseitem(self, t):
"""Internal function."""
return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
def __winfo_getint(self, x):
"""Internal function."""
return int(x, 0)
def winfo_vrootheight(self):
"""Return the height of the virtual root window associated with this
widget in pixels. If there is no virtual root window return the
height of the screen."""
return getint(
self.tk.call('winfo', 'vrootheight', self._w))
def winfo_vrootwidth(self):
"""Return the width of the virtual root window associated with this
widget in pixel. If there is no virtual root window return the
width of the screen."""
return getint(
self.tk.call('winfo', 'vrootwidth', self._w))
def winfo_vrootx(self):
"""Return the x offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrootx', self._w))
def winfo_vrooty(self):
"""Return the y offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrooty', self._w))
def winfo_width(self):
"""Return the width of this widget."""
return getint(
self.tk.call('winfo', 'width', self._w))
def winfo_x(self):
"""Return the x coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'x', self._w))
def winfo_y(self):
"""Return the y coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'y', self._w))
def update(self):
"""Enter event loop until all pending events have been processed by Tcl."""
self.tk.call('update')
def update_idletasks(self):
"""Enter event loop until all idle callbacks have been called. This
will update the display of windows but not process events caused by
the user."""
self.tk.call('update', 'idletasks')
def bindtags(self, tagList=None):
"""Set or get the list of bindtags for this widget.
With no argument return the list of all bindtags associated with
this widget. With a list of strings as argument the bindtags are
set to this list. The bindtags determine in which order events are
processed (see bind)."""
if tagList is None:
return self.tk.splitlist(
self.tk.call('bindtags', self._w))
else:
self.tk.call('bindtags', self._w, tagList)
def _bind(self, what, sequence, func, add, needcleanup=1):
"""Internal function."""
if isinstance(func, str):
self.tk.call(what + (sequence, func))
elif func:
funcid = self._register(func, self._substitute,
needcleanup)
cmd = ('%sif {"[%s %s]" == "break"} break\n'
%
(add and '+' or '',
funcid, self._subst_format_str))
self.tk.call(what + (sequence, cmd))
return funcid
elif sequence:
return self.tk.call(what + (sequence,))
else:
return self.tk.splitlist(self.tk.call(what))
def bind(self, sequence=None, func=None, add=None):
"""Bind to this widget at event SEQUENCE a call to function FUNC.
SEQUENCE is a string of concatenated event
patterns. An event pattern is of the form
<MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
B3, Alt, Button4, B4, Double, Button5, B5 Triple,
Mod1, M1. TYPE is one of Activate, Enter, Map,
ButtonPress, Button, Expose, Motion, ButtonRelease
FocusIn, MouseWheel, Circulate, FocusOut, Property,
Colormap, Gravity Reparent, Configure, KeyPress, Key,
Unmap, Deactivate, KeyRelease Visibility, Destroy,
Leave and DETAIL is the button number for ButtonPress,
ButtonRelease and DETAIL is the Keysym for KeyPress and
KeyRelease. Examples are
<Control-Button-1> for pressing Control and mouse button 1 or
<Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
An event pattern can also be a virtual event of the form
<<AString>> where AString can be arbitrary. This
event can be generated by event_generate.
If events are concatenated they must appear shortly
after each other.
FUNC will be called if the event sequence occurs with an
instance of Event as argument. If the return value of FUNC is
"break" no further bound function is invoked.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function.
Bind will return an identifier to allow deletion of the bound function with
unbind without memory leak.
If FUNC or SEQUENCE is omitted the bound function or list
of bound events are returned."""
return self._bind(('bind', self._w), sequence, func, add)
def unbind(self, sequence, funcid=None):
"""Unbind for this widget for event SEQUENCE the
function identified with FUNCID."""
self.tk.call('bind', self._w, sequence, '')
if funcid:
self.deletecommand(funcid)
def bind_all(self, sequence=None, func=None, add=None):
"""Bind to all widgets at an event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function. See bind for the return value."""
return self._bind(('bind', 'all'), sequence, func, add, 0)
def unbind_all(self, sequence):
"""Unbind for all widgets for event SEQUENCE all functions."""
self.tk.call('bind', 'all' , sequence, '')
def bind_class(self, className, sequence=None, func=None, add=None):
"""Bind to widgets with bindtag CLASSNAME at event
SEQUENCE a call of function FUNC. An additional
boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or
whether it will replace the previous function. See bind for
the return value."""
return self._bind(('bind', className), sequence, func, add, 0)
def unbind_class(self, className, sequence):
"""Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
all functions."""
self.tk.call('bind', className , sequence, '')
def mainloop(self, n=0):
"""Call the mainloop of Tk."""
self.tk.mainloop(n)
def quit(self):
"""Quit the Tcl interpreter. All widgets will be destroyed."""
self.tk.quit()
def _getints(self, string):
"""Internal function."""
if string:
return tuple(map(getint, self.tk.splitlist(string)))
def _getdoubles(self, string):
"""Internal function."""
if string:
return tuple(map(getdouble, self.tk.splitlist(string)))
def _getboolean(self, string):
"""Internal function."""
if string:
return self.tk.getboolean(string)
def _displayof(self, displayof):
"""Internal function."""
if displayof:
return ('-displayof', displayof)
if displayof is None:
return ('-displayof', self._w)
return ()
def _options(self, cnf, kw = None):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
res = ()
for k, v in cnf.items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
elif isinstance(v, (tuple, list)):
nv = []
for item in v:
if isinstance(item, int):
nv.append(str(item))
elif isinstance(item, str):
nv.append(('{%s}' if ' ' in item else '%s') % item)
else:
break
else:
v = ' '.join(nv)
res = res + ('-'+k, v)
return res
def nametowidget(self, name):
"""Return the Tkinter instance of a widget identified by
its Tcl name NAME."""
name = str(name).split('.')
w = self
if not name[0]:
w = w._root()
name = name[1:]
for n in name:
if not n:
break
w = w.children[n]
return w
_nametowidget = nametowidget
def _register(self, func, subst=None, needcleanup=1):
"""Return a newly created Tcl function. If this
function is called, the Python function FUNC will
be executed. An optional function SUBST can
be given which will be executed before FUNC."""
f = CallWrapper(func, subst, self).__call__
name = repr(id(f))
try:
func = func.__func__
except AttributeError:
pass
try:
name = name + func.__name__
except AttributeError:
pass
self.tk.createcommand(name, f)
if needcleanup:
if self._tclCommands is None:
self._tclCommands = []
self._tclCommands.append(name)
return name
register = _register
def _root(self):
"""Internal function."""
w = self
while w.master: w = w.master
return w
_subst_format = ('%#', '%b', '%f', '%h', '%k',
'%s', '%t', '%w', '%x', '%y',
'%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
_subst_format_str = " ".join(_subst_format)
def _substitute(self, *args):
"""Internal function."""
if len(args) != len(self._subst_format): return args
getboolean = self.tk.getboolean
getint = int
def getint_event(s):
"""Tk changed behavior in 8.4.2, returning "??" rather more often."""
try:
return int(s)
except ValueError:
return s
nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
# Missing: (a, c, d, m, o, v, B, R)
e = Event()
# serial field: valid vor all events
# number of button: ButtonPress and ButtonRelease events only
# height field: Configure, ConfigureRequest, Create,
# ResizeRequest, and Expose events only
# keycode field: KeyPress and KeyRelease events only
# time field: "valid for events that contain a time field"
# width field: Configure, ConfigureRequest, Create, ResizeRequest,
# and Expose events only
# x field: "valid for events that contain a x field"
# y field: "valid for events that contain a y field"
# keysym as decimal: KeyPress and KeyRelease events only
# x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
# KeyRelease,and Motion events
e.serial = getint(nsign)
e.num = getint_event(b)
try: e.focus = getboolean(f)
except TclError: pass
e.height = getint_event(h)
e.keycode = getint_event(k)
e.state = getint_event(s)
e.time = getint_event(t)
e.width = getint_event(w)
e.x = getint_event(x)
e.y = getint_event(y)
e.char = A
try: e.send_event = getboolean(E)
except TclError: pass
e.keysym = K
e.keysym_num = getint_event(N)
e.type = T
try:
e.widget = self._nametowidget(W)
except KeyError:
e.widget = W
e.x_root = getint_event(X)
e.y_root = getint_event(Y)
try:
e.delta = getint(D)
except ValueError:
e.delta = 0
return (e,)
def _report_exception(self):
"""Internal function."""
import sys
exc, val, tb = sys.exc_info()
root = self._root()
root.report_callback_exception(exc, val, tb)
def _configure(self, cmd, cnf, kw):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(
self.tk.call(_flatten((self._w, cmd)))):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if isinstance(cnf, str):
x = self.tk.split(
self.tk.call(_flatten((self._w, cmd, '-'+cnf))))
return (x[0][1:],) + x[1:]
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
# These used to be defined in Widget:
def configure(self, cnf=None, **kw):
"""Configure resources of a widget.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method keys.
"""
return self._configure('configure', cnf, kw)
config = configure
def cget(self, key):
"""Return the resource value for a KEY given as string."""
return self.tk.call(self._w, 'cget', '-' + key)
__getitem__ = cget
def __setitem__(self, key, value):
self.configure({key: value})
def keys(self):
"""Return a list of all resource names of this widget."""
return map(lambda x: x[0][1:],
self.tk.split(self.tk.call(self._w, 'configure')))
def __str__(self):
"""Return the window path name of this widget."""
return self._w
# Pack methods that apply to the master
_noarg_ = ['_noarg_']
def pack_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'pack', 'propagate', self._w))
else:
self.tk.call('pack', 'propagate', self._w, flag)
propagate = pack_propagate
def pack_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call('pack', 'slaves', self._w)))
slaves = pack_slaves
# Place method that applies to the master
def place_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'place', 'slaves', self._w)))
# Grid methods that apply to the master
def grid_bbox(self, column=None, row=None, col2=None, row2=None):
"""Return a tuple of integer coordinates for the bounding
box of this widget controlled by the geometry manager grid.
If COLUMN, ROW is given the bounding box applies from
the cell with row and column 0 to the specified
cell. If COL2 and ROW2 are given the bounding box
starts at that cell.
The returned integers specify the offset of the upper left
corner in the master widget and the width and height.
"""
args = ('grid', 'bbox', self._w)
if column is not None and row is not None:
args = args + (column, row)
if col2 is not None and row2 is not None:
args = args + (col2, row2)
return self._getints(self.tk.call(*args)) or None
bbox = grid_bbox
def _grid_configure(self, command, index, cnf, kw):
"""Internal function."""
if isinstance(cnf, str) and not kw:
if cnf[-1:] == '_':
cnf = cnf[:-1]
if cnf[:1] != '-':
cnf = '-'+cnf
options = (cnf,)
else:
options = self._options(cnf, kw)
if not options:
res = self.tk.call('grid',
command, self._w, index)
words = self.tk.splitlist(res)
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if not value:
value = None
elif '.' in value:
value = getdouble(value)
else:
value = getint(value)
dict[key] = value
return dict
res = self.tk.call(
('grid', command, self._w, index)
+ options)
if len(options) == 1:
if not res: return None
# In Tk 7.5, -width can be a float
if '.' in res: return getdouble(res)
return getint(res)
def grid_columnconfigure(self, index, cnf={}, **kw):
"""Configure column INDEX of a grid.
Valid resources are minsize (minimum size of the column),
weight (how much does additional space propagate to this column)
and pad (how much space to let additionally)."""
return self._grid_configure('columnconfigure', index, cnf, kw)
columnconfigure = grid_columnconfigure
def grid_location(self, x, y):
"""Return a tuple of column and row which identify the cell
at which the pixel at position X and Y inside the master
widget is located."""
return self._getints(
self.tk.call(
'grid', 'location', self._w, x, y)) or None
def grid_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given, the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'grid', 'propagate', self._w))
else:
self.tk.call('grid', 'propagate', self._w, flag)
def grid_rowconfigure(self, index, cnf={}, **kw):
"""Configure row INDEX of a grid.
Valid resources are minsize (minimum size of the row),
weight (how much does additional space propagate to this row)
and pad (how much space to let additionally)."""
return self._grid_configure('rowconfigure', index, cnf, kw)
rowconfigure = grid_rowconfigure
def grid_size(self):
"""Return a tuple of the number of column and rows in the grid."""
return self._getints(
self.tk.call('grid', 'size', self._w)) or None
size = grid_size
def grid_slaves(self, row=None, column=None):
"""Return a list of all slaves of this widget
in its packing order."""
args = ()
if row is not None:
args = args + ('-row', row)
if column is not None:
args = args + ('-column', column)
return map(self._nametowidget,
self.tk.splitlist(self.tk.call(
('grid', 'slaves', self._w) + args)))
# Support for the "event" command, new in Tk 4.2.
# By Case Roole.
def event_add(self, virtual, *sequences):
"""Bind a virtual event VIRTUAL (of the form <<Name>>)
to an event SEQUENCE such that the virtual event is triggered
whenever SEQUENCE occurs."""
args = ('event', 'add', virtual) + sequences
self.tk.call(args)
def event_delete(self, virtual, *sequences):
"""Unbind a virtual event VIRTUAL from SEQUENCE."""
args = ('event', 'delete', virtual) + sequences
self.tk.call(args)
def event_generate(self, sequence, **kw):
"""Generate an event SEQUENCE. Additional
keyword arguments specify parameter of the event
(e.g. x, y, rootx, rooty)."""
args = ('event', 'generate', self._w, sequence)
for k, v in kw.items():
args = args + ('-%s' % k, str(v))
self.tk.call(args)
def event_info(self, virtual=None):
"""Return a list of all virtual events or the information
about the SEQUENCE bound to the virtual event VIRTUAL."""
return self.tk.splitlist(
self.tk.call('event', 'info', virtual))
# Image related commands
def image_names(self):
"""Return a list of all existing image names."""
return self.tk.call('image', 'names')
def image_types(self):
"""Return a list of all available image types (e.g. phote bitmap)."""
return self.tk.call('image', 'types')
class CallWrapper:
"""Internal class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except:
self.widget._report_exception()
class Wm:
"""Provides functions for the communication with the window manager."""
def wm_aspect(self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None):
"""Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given."""
return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom))
aspect = wm_aspect
def wm_attributes(self, *args):
"""This subcommand returns or sets platform specific attributes
The first form returns a list of the platform specific flags and
their values. The second form returns the value for the specific
option. The third form sets one or more of the values. The values
are as follows:
On Windows, -disabled gets or sets whether the window is in a
disabled state. -toolwindow gets or sets the style of the window
to toolwindow (as defined in the MSDN). -topmost gets or sets
whether this is a topmost window (displays above all other
windows).
On Macintosh, XXXXX
On Unix, there are currently no special attribute values.
"""
args = ('wm', 'attributes', self._w) + args
return self.tk.call(args)
attributes=wm_attributes
def wm_client(self, name=None):
"""Store NAME in WM_CLIENT_MACHINE property of this widget. Return
current value."""
return self.tk.call('wm', 'client', self._w, name)
client = wm_client
def wm_colormapwindows(self, *wlist):
"""Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
of this widget. This list contains windows whose colormaps differ from their
parents. Return current list of widgets if WLIST is empty."""
if len(wlist) > 1:
wlist = (wlist,) # Tk needs a list of windows here
args = ('wm', 'colormapwindows', self._w) + wlist
return map(self._nametowidget, self.tk.call(args))
colormapwindows = wm_colormapwindows
def wm_command(self, value=None):
"""Store VALUE in WM_COMMAND property. It is the command
which shall be used to invoke the application. Return current
command if VALUE is None."""
return self.tk.call('wm', 'command', self._w, value)
command = wm_command
def wm_deiconify(self):
"""Deiconify this widget. If it was never mapped it will not be mapped.
On Windows it will raise this widget and give it the focus."""
return self.tk.call('wm', 'deiconify', self._w)
deiconify = wm_deiconify
def wm_focusmodel(self, model=None):
"""Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None."""
return self.tk.call('wm', 'focusmodel', self._w, model)
focusmodel = wm_focusmodel
def wm_frame(self):
"""Return identifier for decorative frame of this widget if present."""
return self.tk.call('wm', 'frame', self._w)
frame = wm_frame
def wm_geometry(self, newGeometry=None):
"""Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
current value if None is given."""
return self.tk.call('wm', 'geometry', self._w, newGeometry)
geometry = wm_geometry
def wm_grid(self,
baseWidth=None, baseHeight=None,
widthInc=None, heightInc=None):
"""Instruct the window manager that this widget shall only be
resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
number of grid units requested in Tk_GeometryRequest."""
return self._getints(self.tk.call(
'wm', 'grid', self._w,
baseWidth, baseHeight, widthInc, heightInc))
grid = wm_grid
def wm_group(self, pathName=None):
"""Set the group leader widgets for related widgets to PATHNAME. Return
the group leader of this widget if None is given."""
return self.tk.call('wm', 'group', self._w, pathName)
group = wm_group
def wm_iconbitmap(self, bitmap=None, default=None):
"""Set bitmap for the iconified widget to BITMAP. Return
the bitmap if None is given.
Under Windows, the DEFAULT parameter can be used to set the icon
for the widget and any descendents that don't have an icon set
explicitly. DEFAULT can be the relative path to a .ico file
(example: root.iconbitmap(default='myicon.ico') ). See Tk
documentation for more information."""
if default:
return self.tk.call('wm', 'iconbitmap', self._w, '-default', default)
else:
return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
iconbitmap = wm_iconbitmap
def wm_iconify(self):
"""Display widget as icon."""
return self.tk.call('wm', 'iconify', self._w)
iconify = wm_iconify
def wm_iconmask(self, bitmap=None):
"""Set mask for the icon bitmap of this widget. Return the
mask if None is given."""
return self.tk.call('wm', 'iconmask', self._w, bitmap)
iconmask = wm_iconmask
def wm_iconname(self, newName=None):
"""Set the name of the icon for this widget. Return the name if
None is given."""
return self.tk.call('wm', 'iconname', self._w, newName)
iconname = wm_iconname
def wm_iconposition(self, x=None, y=None):
"""Set the position of the icon of this widget to X and Y. Return
a tuple of the current values of X and X if None is given."""
return self._getints(self.tk.call(
'wm', 'iconposition', self._w, x, y))
iconposition = wm_iconposition
def wm_iconwindow(self, pathName=None):
"""Set widget PATHNAME to be displayed instead of icon. Return the current
value if None is given."""
return self.tk.call('wm', 'iconwindow', self._w, pathName)
iconwindow = wm_iconwindow
def wm_maxsize(self, width=None, height=None):
"""Set max WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'maxsize', self._w, width, height))
maxsize = wm_maxsize
def wm_minsize(self, width=None, height=None):
"""Set min WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'minsize', self._w, width, height))
minsize = wm_minsize
def wm_overrideredirect(self, boolean=None):
"""Instruct the window manager to ignore this widget
if BOOLEAN is given with 1. Return the current value if None
is given."""
return self._getboolean(self.tk.call(
'wm', 'overrideredirect', self._w, boolean))
overrideredirect = wm_overrideredirect
def wm_positionfrom(self, who=None):
"""Instruct the window manager that the position of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'positionfrom', self._w, who)
positionfrom = wm_positionfrom
def wm_protocol(self, name=None, func=None):
"""Bind function FUNC to command NAME for this widget.
Return the function bound to NAME if None is given. NAME could be
e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
if hasattr(func, '__call__'):
command = self._register(func)
else:
command = func
return self.tk.call(
'wm', 'protocol', self._w, name, command)
protocol = wm_protocol
def wm_resizable(self, width=None, height=None):
"""Instruct the window manager whether this width can be resized
in WIDTH or HEIGHT. Both values are boolean values."""
return self.tk.call('wm', 'resizable', self._w, width, height)
resizable = wm_resizable
def wm_sizefrom(self, who=None):
"""Instruct the window manager that the size of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'sizefrom', self._w, who)
sizefrom = wm_sizefrom
def wm_state(self, newstate=None):
"""Query or set the state of this widget as one of normal, icon,
iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
return self.tk.call('wm', 'state', self._w, newstate)
state = wm_state
def wm_title(self, string=None):
"""Set the title of this widget."""
return self.tk.call('wm', 'title', self._w, string)
title = wm_title
def wm_transient(self, master=None):
"""Instruct the window manager that this widget is transient
with regard to widget MASTER."""
return self.tk.call('wm', 'transient', self._w, master)
transient = wm_transient
def wm_withdraw(self):
"""Withdraw this widget from the screen such that it is unmapped
and forgotten by the window manager. Re-draw it with wm_deiconify."""
return self.tk.call('wm', 'withdraw', self._w)
withdraw = wm_withdraw
class Tk(Misc, Wm):
"""Toplevel widget of Tk which represents mostly the main window
of an appliation. It has an associated Tcl interpreter."""
_w = '.'
def __init__(self, screenName=None, baseName=None, className='Tk',
useTk=1, sync=0, use=None):
"""Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
be created. BASENAME will be used for the identification of the profile file (see
readprofile).
It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
is the name of the widget class."""
self.master = None
self.children = {}
self._tkloaded = 0
# to avoid recursions in the getattr code in case of failure, we
# ensure that self.tk is always _something_.
self.tk = None
if baseName is None:
import sys, os
baseName = os.path.basename(sys.argv[0])
baseName, ext = os.path.splitext(baseName)
if ext not in ('.py', '.pyc', '.pyo'):
baseName = baseName + ext
interactive = 0
self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
if useTk:
self._loadtk()
self.readprofile(baseName, className)
def loadtk(self):
if not self._tkloaded:
self.tk.loadtk()
self._loadtk()
def _loadtk(self):
self._tkloaded = 1
global _default_root
# Version sanity checks
tk_version = self.tk.getvar('tk_version')
if tk_version != _tkinter.TK_VERSION:
raise RuntimeError("tk.h version (%s) doesn't match libtk.a version (%s)"
% (_tkinter.TK_VERSION, tk_version))
# Under unknown circumstances, tcl_version gets coerced to float
tcl_version = str(self.tk.getvar('tcl_version'))
if tcl_version != _tkinter.TCL_VERSION:
raise RuntimeError("tcl.h version (%s) doesn't match libtcl.a version (%s)" \
% (_tkinter.TCL_VERSION, tcl_version))
if TkVersion < 4.0:
raise RuntimeError("Tk 4.0 or higher is required; found Tk %s"
% str(TkVersion))
# Create and register the tkerror and exit commands
# We need to inline parts of _register here, _ register
# would register differently-named commands.
if self._tclCommands is None:
self._tclCommands = []
self.tk.createcommand('tkerror', _tkerror)
self.tk.createcommand('exit', _exit)
self._tclCommands.append('tkerror')
self._tclCommands.append('exit')
if _support_default_root and not _default_root:
_default_root = self
self.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self):
"""Destroy this and all descendants widgets. This will
end the application of this Tcl interpreter."""
for c in list(self.children.values()): c.destroy()
self.tk.call('destroy', self._w)
Misc.destroy(self)
global _default_root
if _support_default_root and _default_root is self:
_default_root = None
def readprofile(self, baseName, className):
"""Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls exec on the contents of BASENAME.py and
CLASSNAME.py if such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
class_tcl = os.path.join(home, '.%s.tcl' % className)
class_py = os.path.join(home, '.%s.py' % className)
base_tcl = os.path.join(home, '.%s.tcl' % baseName)
base_py = os.path.join(home, '.%s.py' % baseName)
dir = {'self': self}
exec('from tkinter import *', dir)
if os.path.isfile(class_tcl):
self.tk.call('source', class_tcl)
if os.path.isfile(class_py):
exec(open(class_py).read(), dir)
if os.path.isfile(base_tcl):
self.tk.call('source', base_tcl)
if os.path.isfile(base_py):
exec(open(base_py).read(), dir)
def report_callback_exception(self, exc, val, tb):
"""Internal function. It reports exception on sys.stderr."""
import traceback, sys
sys.stderr.write("Exception in Tkinter callback\n")
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
traceback.print_exception(exc, val, tb)
def __getattr__(self, attr):
"Delegate attribute access to the interpreter object"
return getattr(self.tk, attr)
# Ideally, the classes Pack, Place and Grid disappear, the
# pack/place/grid methods are defined on the Widget class, and
# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
# ...), with pack(), place() and grid() being short for
# pack_configure(), place_configure() and grid_columnconfigure(), and
# forget() being short for pack_forget(). As a practical matter, I'm
# afraid that there is too much code out there that may be using the
# Pack, Place or Grid class, so I leave them intact -- but only as
# backwards compatibility features. Also note that those methods that
# take a master as argument (e.g. pack_propagate) have been moved to
# the Misc class (which now incorporates all methods common between
# toplevel and interior widgets). Again, for compatibility, these are
# copied into the Pack, Place or Grid class.
def Tcl(screenName=None, baseName=None, className='Tk', useTk=0):
return Tk(screenName, baseName, className, useTk)
class Pack:
"""Geometry manager Pack.
Base class to use the methods pack_* in every widget."""
def pack_configure(self, cnf={}, **kw):
"""Pack a widget in the parent widget. Use as options:
after=widget - pack it after you have packed widget
anchor=NSEW (or subset) - position widget according to
given direction
before=widget - pack it before you will pack widget
expand=bool - expand widget if parent size grows
fill=NONE or X or Y or BOTH - fill widget if widget grows
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.
"""
self.tk.call(
('pack', 'configure', self._w)
+ self._options(cnf, kw))
pack = configure = config = pack_configure
def pack_forget(self):
"""Unmap this widget and do not use it for the packing order."""
self.tk.call('pack', 'forget', self._w)
forget = pack_forget
def pack_info(self):
"""Return information about the packing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('pack', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = pack_info
propagate = pack_propagate = Misc.pack_propagate
slaves = pack_slaves = Misc.pack_slaves
class Place:
"""Geometry manager Place.
Base class to use the methods place_* in every widget."""
def place_configure(self, cnf={}, **kw):
"""Place a widget in the parent widget. Use as options:
in=master - master relative to which the widget is placed
in_=master - see 'in' option description
x=amount - locate anchor of this widget at position x of master
y=amount - locate anchor of this widget at position y of master
relx=amount - locate anchor of this widget between 0.0 and 1.0
relative to width of master (1.0 is right edge)
rely=amount - locate anchor of this widget between 0.0 and 1.0
relative to height of master (1.0 is bottom edge)
anchor=NSEW (or subset) - position anchor according to given direction
width=amount - width of this widget in pixel
height=amount - height of this widget in pixel
relwidth=amount - width of this widget between 0.0 and 1.0
relative to width of master (1.0 is the same width
as the master)
relheight=amount - height of this widget between 0.0 and 1.0
relative to height of master (1.0 is the same
height as the master)
bordermode="inside" or "outside" - whether to take border width of
master widget into account
"""
self.tk.call(
('place', 'configure', self._w)
+ self._options(cnf, kw))
place = configure = config = place_configure
def place_forget(self):
"""Unmap this widget."""
self.tk.call('place', 'forget', self._w)
forget = place_forget
def place_info(self):
"""Return information about the placing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('place', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = place_info
slaves = place_slaves = Misc.place_slaves
class Grid:
"""Geometry manager Grid.
Base class to use the methods grid_* in every widget."""
# Thanks to Masazumi Yoshikawa ([email protected])
def grid_configure(self, cnf={}, **kw):
"""Position a widget in the parent widget in a grid. Use as options:
column=number - use cell identified with given column (starting with 0)
columnspan=number - this widget will span several columns
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
row=number - use cell identified with given row (starting with 0)
rowspan=number - this widget will span several rows
sticky=NSEW - if cell is larger on which sides will this
widget stick to the cell boundary
"""
self.tk.call(
('grid', 'configure', self._w)
+ self._options(cnf, kw))
grid = configure = config = grid_configure
bbox = grid_bbox = Misc.grid_bbox
columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
def grid_forget(self):
"""Unmap this widget."""
self.tk.call('grid', 'forget', self._w)
forget = grid_forget
def grid_remove(self):
"""Unmap this widget but remember the grid options."""
self.tk.call('grid', 'remove', self._w)
def grid_info(self):
"""Return information about the options
for positioning this widget in a grid."""
words = self.tk.splitlist(
self.tk.call('grid', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = grid_info
location = grid_location = Misc.grid_location
propagate = grid_propagate = Misc.grid_propagate
rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
size = grid_size = Misc.grid_size
slaves = grid_slaves = Misc.grid_slaves
class BaseWidget(Misc):
"""Internal class."""
def _setup(self, master, cnf):
"""Internal function. Sets up information about children."""
if _support_default_root:
global _default_root
if not master:
if not _default_root:
_default_root = Tk()
master = _default_root
self.master = master
self.tk = master.tk
name = None
if 'name' in cnf:
name = cnf['name']
del cnf['name']
if not name:
name = repr(id(self))
self._name = name
if master._w=='.':
self._w = '.' + name
else:
self._w = master._w + '.' + name
self.children = {}
if self._name in self.master.children:
self.master.children[self._name].destroy()
self.master.children[self._name] = self
def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
"""Construct a widget with the parent widget MASTER, a name WIDGETNAME
and appropriate options."""
if kw:
cnf = _cnfmerge((cnf, kw))
self.widgetName = widgetName
BaseWidget._setup(self, master, cnf)
if self._tclCommands is None:
self._tclCommands = []
classes = [(k, v) for k, v in cnf.items() if isinstance(k, type)]
for k, v in classes:
del cnf[k]
self.tk.call(
(widgetName, self._w) + extra + self._options(cnf))
for k, v in classes:
k.configure(self, v)
def destroy(self):
"""Destroy this and all descendants widgets."""
for c in list(self.children.values()): c.destroy()
self.tk.call('destroy', self._w)
if self._name in self.master.children:
del self.master.children[self._name]
Misc.destroy(self)
def _do(self, name, args=()):
# XXX Obsolete -- better use self.tk.call directly!
return self.tk.call((self._w, name) + args)
class Widget(BaseWidget, Pack, Place, Grid):
"""Internal class.
Base class for a widget which can be positioned with the geometry managers
Pack, Place or Grid."""
pass
class Toplevel(BaseWidget, Wm):
"""Toplevel widget, e.g. for dialogs."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a toplevel widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, menu, relief, screen, takefocus,
use, visual, width."""
if kw:
cnf = _cnfmerge((cnf, kw))
extra = ()
for wmkey in ['screen', 'class_', 'class', 'visual',
'colormap']:
if wmkey in cnf:
val = cnf[wmkey]
# TBD: a hack needed because some keys
# are not valid as keyword arguments
if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
else: opt = '-'+wmkey
extra = extra + (opt, val)
del cnf[wmkey]
BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
root = self._root()
self.iconname(root.iconname())
self.title(root.title())
self.protocol("WM_DELETE_WINDOW", self.destroy)
class Button(Widget):
"""Button widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a button widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, repeatdelay,
repeatinterval, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
command, compound, default, height,
overrelief, state, width
"""
Widget.__init__(self, master, 'button', cnf, kw)
def tkButtonEnter(self, *dummy):
self.tk.call('tkButtonEnter', self._w)
def tkButtonLeave(self, *dummy):
self.tk.call('tkButtonLeave', self._w)
def tkButtonDown(self, *dummy):
self.tk.call('tkButtonDown', self._w)
def tkButtonUp(self, *dummy):
self.tk.call('tkButtonUp', self._w)
def tkButtonInvoke(self, *dummy):
self.tk.call('tkButtonInvoke', self._w)
def flash(self):
"""Flash the button.
This is accomplished by redisplaying
the button several times, alternating between active and
normal colors. At the end of the flash the button is left
in the same normal/active state as when the command was
invoked. This command is ignored if the button's state is
disabled.
"""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled.
"""
return self.tk.call(self._w, 'invoke')
# Indices:
# XXX I don't like these -- take them away
def AtEnd():
return 'end'
def AtInsert(*args):
s = 'insert'
for a in args:
if a: s = s + (' ' + a)
return s
def AtSelFirst():
return 'sel.first'
def AtSelLast():
return 'sel.last'
def At(x, y=None):
if y is None:
return '@%r' % (x,)
else:
return '@%r,%r' % (x, y)
class Canvas(Widget):
"""Canvas widget to display graphical elements like lines or text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a canvas widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, closeenough,
confine, cursor, height, highlightbackground, highlightcolor,
highlightthickness, insertbackground, insertborderwidth,
insertofftime, insertontime, insertwidth, offset, relief,
scrollregion, selectbackground, selectborderwidth, selectforeground,
state, takefocus, width, xscrollcommand, xscrollincrement,
yscrollcommand, yscrollincrement."""
Widget.__init__(self, master, 'canvas', cnf, kw)
def addtag(self, *args):
"""Internal function."""
self.tk.call((self._w, 'addtag') + args)
def addtag_above(self, newtag, tagOrId):
"""Add tag NEWTAG to all items above TAGORID."""
self.addtag(newtag, 'above', tagOrId)
def addtag_all(self, newtag):
"""Add tag NEWTAG to all items."""
self.addtag(newtag, 'all')
def addtag_below(self, newtag, tagOrId):
"""Add tag NEWTAG to all items below TAGORID."""
self.addtag(newtag, 'below', tagOrId)
def addtag_closest(self, newtag, x, y, halo=None, start=None):
"""Add tag NEWTAG to item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
self.addtag(newtag, 'closest', x, y, halo, start)
def addtag_enclosed(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items in the rectangle defined
by X1,Y1,X2,Y2."""
self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, newtag, tagOrId):
"""Add tag NEWTAG to all items with TAGORID."""
self.addtag(newtag, 'withtag', tagOrId)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses all items with tags specified as arguments."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tag_unbind(self, tagOrId, sequence, funcid=None):
"""Unbind for all items with TAGORID for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'bind', tagOrId, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
"""Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'bind', tagOrId),
sequence, func, add)
def canvasx(self, screenx, gridspacing=None):
"""Return the canvas x coordinate of pixel position SCREENX rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasx', screenx, gridspacing))
def canvasy(self, screeny, gridspacing=None):
"""Return the canvas y coordinate of pixel position SCREENY rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasy', screeny, gridspacing))
def coords(self, *args):
"""Return a list of coordinates for the item given in ARGS."""
# XXX Should use _flatten on args
return map(getdouble,
self.tk.splitlist(
self.tk.call((self._w, 'coords') + args)))
def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
"""Internal function."""
args = _flatten(args)
cnf = args[-1]
if isinstance(cnf, (dict, tuple)):
args = args[:-1]
else:
cnf = {}
return getint(self.tk.call(
self._w, 'create', itemType,
*(args + self._options(cnf, kw))))
def create_arc(self, *args, **kw):
"""Create arc shaped region with coordinates x1,y1,x2,y2."""
return self._create('arc', args, kw)
def create_bitmap(self, *args, **kw):
"""Create bitmap with coordinates x1,y1."""
return self._create('bitmap', args, kw)
def create_image(self, *args, **kw):
"""Create image item with coordinates x1,y1."""
return self._create('image', args, kw)
def create_line(self, *args, **kw):
"""Create line with coordinates x1,y1,...,xn,yn."""
return self._create('line', args, kw)
def create_oval(self, *args, **kw):
"""Create oval with coordinates x1,y1,x2,y2."""
return self._create('oval', args, kw)
def create_polygon(self, *args, **kw):
"""Create polygon with coordinates x1,y1,...,xn,yn."""
return self._create('polygon', args, kw)
def create_rectangle(self, *args, **kw):
"""Create rectangle with coordinates x1,y1,x2,y2."""
return self._create('rectangle', args, kw)
def create_text(self, *args, **kw):
"""Create text with coordinates x1,y1."""
return self._create('text', args, kw)
def create_window(self, *args, **kw):
"""Create window with coordinates x1,y1,x2,y2."""
return self._create('window', args, kw)
def dchars(self, *args):
"""Delete characters of text items identified by tag or id in ARGS (possibly
several times) from FIRST to LAST character (including)."""
self.tk.call((self._w, 'dchars') + args)
def delete(self, *args):
"""Delete items identified by all tag or ids contained in ARGS."""
self.tk.call((self._w, 'delete') + args)
def dtag(self, *args):
"""Delete tag or id given as last arguments in ARGS from items
identified by first argument in ARGS."""
self.tk.call((self._w, 'dtag') + args)
def find(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'find') + args)) or ()
def find_above(self, tagOrId):
"""Return items above TAGORID."""
return self.find('above', tagOrId)
def find_all(self):
"""Return all items."""
return self.find('all')
def find_below(self, tagOrId):
"""Return all items below TAGORID."""
return self.find('below', tagOrId)
def find_closest(self, x, y, halo=None, start=None):
"""Return item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
return self.find('closest', x, y, halo, start)
def find_enclosed(self, x1, y1, x2, y2):
"""Return all items in rectangle defined
by X1,Y1,X2,Y2."""
return self.find('enclosed', x1, y1, x2, y2)
def find_overlapping(self, x1, y1, x2, y2):
"""Return all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
return self.find('overlapping', x1, y1, x2, y2)
def find_withtag(self, tagOrId):
"""Return all items with TAGORID."""
return self.find('withtag', tagOrId)
def focus(self, *args):
"""Set focus to the first item specified in ARGS."""
return self.tk.call((self._w, 'focus') + args)
def gettags(self, *args):
"""Return tags associated with the first item specified in ARGS."""
return self.tk.splitlist(
self.tk.call((self._w, 'gettags') + args))
def icursor(self, *args):
"""Set cursor at position POS in the item identified by TAGORID.
In ARGS TAGORID must be first."""
self.tk.call((self._w, 'icursor') + args)
def index(self, *args):
"""Return position of cursor as integer in item specified in ARGS."""
return getint(self.tk.call((self._w, 'index') + args))
def insert(self, *args):
"""Insert TEXT in item TAGORID at position POS. ARGS must
be TAGORID POS TEXT."""
self.tk.call((self._w, 'insert') + args)
def itemcget(self, tagOrId, option):
"""Return the resource value for an OPTION for item TAGORID."""
return self.tk.call(
(self._w, 'itemcget') + (tagOrId, '-'+option))
def itemconfigure(self, tagOrId, cnf=None, **kw):
"""Configure resources of an item TAGORID.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method without arguments.
"""
return self._configure(('itemconfigure', tagOrId), cnf, kw)
itemconfig = itemconfigure
# lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
# so the preferred name for them is tag_lower, tag_raise
# (similar to tag_bind, and similar to the Text widget);
# unfortunately can't delete the old ones yet (maybe in 1.6)
def tag_lower(self, *args):
"""Lower an item TAGORID given in ARGS
(optional below another item)."""
self.tk.call((self._w, 'lower') + args)
lower = tag_lower
def move(self, *args):
"""Move an item TAGORID given in ARGS."""
self.tk.call((self._w, 'move') + args)
def postscript(self, cnf={}, **kw):
"""Print the contents of the canvas to a postscript
file. Valid options: colormap, colormode, file, fontmap,
height, pageanchor, pageheight, pagewidth, pagex, pagey,
rotate, witdh, x, y."""
return self.tk.call((self._w, 'postscript') +
self._options(cnf, kw))
def tag_raise(self, *args):
"""Raise an item TAGORID given in ARGS
(optional above another item)."""
self.tk.call((self._w, 'raise') + args)
lift = tkraise = tag_raise
def scale(self, *args):
"""Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
self.tk.call((self._w, 'scale') + args)
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y, gain=10):
"""Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
def select_adjust(self, tagOrId, index):
"""Adjust the end of the selection near the cursor of an item TAGORID to index."""
self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
def select_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'select', 'clear')
def select_from(self, tagOrId, index):
"""Set the fixed end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'from', tagOrId, index)
def select_item(self):
"""Return the item which has the selection."""
return self.tk.call(self._w, 'select', 'item') or None
def select_to(self, tagOrId, index):
"""Set the variable end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'to', tagOrId, index)
def type(self, tagOrId):
"""Return the type of the item TAGORID."""
return self.tk.call(self._w, 'type', tagOrId) or None
def xview(self, *args):
"""Query and change horizontal position of the view."""
if not args:
return self._getdoubles(self.tk.call(self._w, 'xview'))
self.tk.call((self._w, 'xview') + args)
def xview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total width of the canvas is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
def yview(self, *args):
"""Query and change vertical position of the view."""
if not args:
return self._getdoubles(self.tk.call(self._w, 'yview'))
self.tk.call((self._w, 'yview') + args)
def yview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total height of the canvas is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
class Checkbutton(Widget):
"""Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a checkbutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, offvalue, onvalue, padx, pady, relief,
selectcolor, selectimage, state, takefocus, text, textvariable,
underline, variable, width, wraplength."""
Widget.__init__(self, master, 'checkbutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
def toggle(self):
"""Toggle the button."""
self.tk.call(self._w, 'toggle')
class Entry(Widget):
"""Entry widget which allows to display simple text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct an entry widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, highlightbackground,
highlightcolor, highlightthickness, insertbackground,
insertborderwidth, insertofftime, insertontime, insertwidth,
invalidcommand, invcmd, justify, relief, selectbackground,
selectborderwidth, selectforeground, show, state, takefocus,
textvariable, validate, validatecommand, vcmd, width,
xscrollcommand."""
Widget.__init__(self, master, 'entry', cnf, kw)
def delete(self, first, last=None):
"""Delete text from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Return the text."""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Insert cursor at INDEX."""
self.tk.call(self._w, 'icursor', index)
def index(self, index):
"""Return position of cursor."""
return getint(self.tk.call(
self._w, 'index', index))
def insert(self, index, string):
"""Insert STRING at INDEX."""
self.tk.call(self._w, 'insert', index, string)
def scan_mark(self, x):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x)
def scan_dragto(self, x):
"""Adjust the view of the canvas to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x)
def selection_adjust(self, index):
"""Adjust the end of the selection near the cursor to INDEX."""
self.tk.call(self._w, 'selection', 'adjust', index)
select_adjust = selection_adjust
def selection_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'selection', 'clear')
select_clear = selection_clear
def selection_from(self, index):
"""Set the fixed end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'from', index)
select_from = selection_from
def selection_present(self):
"""Return whether the widget has the selection."""
return self.tk.getboolean(
self.tk.call(self._w, 'selection', 'present'))
select_present = selection_present
def selection_range(self, start, end):
"""Set the selection from START to END (not included)."""
self.tk.call(self._w, 'selection', 'range', start, end)
select_range = selection_range
def selection_to(self, index):
"""Set the variable end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'to', index)
select_to = selection_to
def xview(self, index):
"""Query and change horizontal position of the view."""
self.tk.call(self._w, 'xview', index)
def xview_moveto(self, fraction):
"""Adjust the view in the window so that FRACTION of the
total width of the entry is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
class Frame(Widget):
"""Frame widget which may contain other widgets and can have a 3D border."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a frame widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width."""
cnf = _cnfmerge((cnf, kw))
extra = ()
if 'class_' in cnf:
extra = ('-class', cnf['class_'])
del cnf['class_']
elif 'class' in cnf:
extra = ('-class', cnf['class'])
del cnf['class']
Widget.__init__(self, master, 'frame', cnf, {}, extra)
class Label(Widget):
"""Label widget which can display text and bitmaps."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width
"""
Widget.__init__(self, master, 'label', cnf, kw)
class Listbox(Widget):
"""Listbox widget which can display a list of strings."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a listbox widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, height, highlightbackground,
highlightcolor, highlightthickness, relief, selectbackground,
selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
width, xscrollcommand, yscrollcommand, listvariable."""
Widget.__init__(self, master, 'listbox', cnf, kw)
def activate(self, index):
"""Activate item identified by INDEX."""
self.tk.call(self._w, 'activate', index)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses the item identified by index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def curselection(self):
"""Return list of indices of currently selected item."""
# XXX Ought to apply self._getints()...
return self.tk.splitlist(self.tk.call(
self._w, 'curselection'))
def delete(self, first, last=None):
"""Delete items from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self, first, last=None):
"""Get list of items from FIRST to LAST (not included)."""
if last:
return self.tk.splitlist(self.tk.call(
self._w, 'get', first, last))
else:
return self.tk.call(self._w, 'get', first)
def index(self, index):
"""Return index of item identified with INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def insert(self, index, *elements):
"""Insert ELEMENTS at INDEX."""
self.tk.call((self._w, 'insert', index) + elements)
def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
return getint(self.tk.call(
self._w, 'nearest', y))
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the listbox to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def see(self, index):
"""Scroll such that INDEX is visible."""
self.tk.call(self._w, 'see', index)
def selection_anchor(self, index):
"""Set the fixed end oft the selection to INDEX."""
self.tk.call(self._w, 'selection', 'anchor', index)
select_anchor = selection_anchor
def selection_clear(self, first, last=None):
"""Clear the selection from FIRST to LAST (not included)."""
self.tk.call(self._w,
'selection', 'clear', first, last)
select_clear = selection_clear
def selection_includes(self, index):
"""Return 1 if INDEX is part of the selection."""
return self.tk.getboolean(self.tk.call(
self._w, 'selection', 'includes', index))
select_includes = selection_includes
def selection_set(self, first, last=None):
"""Set the selection from FIRST to LAST (not included) without
changing the currently selected elements."""
self.tk.call(self._w, 'selection', 'set', first, last)
select_set = selection_set
def size(self):
"""Return the number of elements in the listbox."""
return getint(self.tk.call(self._w, 'size'))
def xview(self, *what):
"""Query and change horizontal position of the view."""
if not what:
return self._getdoubles(self.tk.call(self._w, 'xview'))
self.tk.call((self._w, 'xview') + what)
def xview_moveto(self, fraction):
"""Adjust the view in the window so that FRACTION of the
total width of the entry is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
def yview(self, *what):
"""Query and change vertical position of the view."""
if not what:
return self._getdoubles(self.tk.call(self._w, 'yview'))
self.tk.call((self._w, 'yview') + what)
def yview_moveto(self, fraction):
"""Adjust the view in the window so that FRACTION of the
total width of the entry is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
return self.tk.call(
(self._w, 'itemcget') + (index, '-'+option))
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
return self._configure(('itemconfigure', index), cnf, kw)
itemconfig = itemconfigure
class Menu(Widget):
"""Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct menu widget with the parent MASTER.
Valid resource names: activebackground, activeborderwidth,
activeforeground, background, bd, bg, borderwidth, cursor,
disabledforeground, fg, font, foreground, postcommand, relief,
selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
Widget.__init__(self, master, 'menu', cnf, kw)
def tk_bindForTraversal(self):
pass # obsolete since Tk 4.0
def tk_mbPost(self):
self.tk.call('tk_mbPost', self._w)
def tk_mbUnpost(self):
self.tk.call('tk_mbUnpost')
def tk_traverseToMenu(self, char):
self.tk.call('tk_traverseToMenu', self._w, char)
def tk_traverseWithinMenu(self, char):
self.tk.call('tk_traverseWithinMenu', self._w, char)
def tk_getMenuButtons(self):
return self.tk.call('tk_getMenuButtons', self._w)
def tk_nextMenu(self, count):
self.tk.call('tk_nextMenu', count)
def tk_nextMenuEntry(self, count):
self.tk.call('tk_nextMenuEntry', count)
def tk_invokeMenu(self):
self.tk.call('tk_invokeMenu', self._w)
def tk_firstMenu(self):
self.tk.call('tk_firstMenu', self._w)
def tk_mbButtonDown(self):
self.tk.call('tk_mbButtonDown', self._w)
def tk_popup(self, x, y, entry=""):
"""Post the menu at position X,Y with entry ENTRY."""
self.tk.call('tk_popup', self._w, x, y, entry)
def activate(self, index):
"""Activate entry at INDEX."""
self.tk.call(self._w, 'activate', index)
def add(self, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'add', itemType) +
self._options(cnf, kw))
def add_cascade(self, cnf={}, **kw):
"""Add hierarchical menu item."""
self.add('cascade', cnf or kw)
def add_checkbutton(self, cnf={}, **kw):
"""Add checkbutton menu item."""
self.add('checkbutton', cnf or kw)
def add_command(self, cnf={}, **kw):
"""Add command menu item."""
self.add('command', cnf or kw)
def add_radiobutton(self, cnf={}, **kw):
"""Addd radio menu item."""
self.add('radiobutton', cnf or kw)
def add_separator(self, cnf={}, **kw):
"""Add separator."""
self.add('separator', cnf or kw)
def insert(self, index, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'insert', index, itemType) +
self._options(cnf, kw))
def insert_cascade(self, index, cnf={}, **kw):
"""Add hierarchical menu item at INDEX."""
self.insert(index, 'cascade', cnf or kw)
def insert_checkbutton(self, index, cnf={}, **kw):
"""Add checkbutton menu item at INDEX."""
self.insert(index, 'checkbutton', cnf or kw)
def insert_command(self, index, cnf={}, **kw):
"""Add command menu item at INDEX."""
self.insert(index, 'command', cnf or kw)
def insert_radiobutton(self, index, cnf={}, **kw):
"""Addd radio menu item at INDEX."""
self.insert(index, 'radiobutton', cnf or kw)
def insert_separator(self, index, cnf={}, **kw):
"""Add separator at INDEX."""
self.insert(index, 'separator', cnf or kw)
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
num_index1, num_index2 = self.index(index1), self.index(index2)
if (num_index1 is None) or (num_index2 is None):
num_index1, num_index2 = 0, -1
for i in range(num_index1, num_index2 + 1):
if 'command' in self.entryconfig(i):
c = str(self.entrycget(i, 'command'))
if c:
self.deletecommand(c)
self.tk.call(self._w, 'delete', index1, index2)
def entrycget(self, index, option):
"""Return the resource value of an menu item for OPTION at INDEX."""
return self.tk.call(self._w, 'entrycget', index, '-' + option)
def entryconfigure(self, index, cnf=None, **kw):
"""Configure a menu item at INDEX."""
return self._configure(('entryconfigure', index), cnf, kw)
entryconfig = entryconfigure
def index(self, index):
"""Return the index of a menu item identified by INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index)
def post(self, x, y):
"""Display a menu at position X,Y."""
self.tk.call(self._w, 'post', x, y)
def type(self, index):
"""Return the type of the menu item at INDEX."""
return self.tk.call(self._w, 'type', index)
def unpost(self):
"""Unmap a menu."""
self.tk.call(self._w, 'unpost')
def yposition(self, index):
"""Return the y-position of the topmost pixel of the menu item at INDEX."""
return getint(self.tk.call(
self._w, 'yposition', index))
class Menubutton(Widget):
"""Menubutton widget, obsolete since Tk8.0."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'menubutton', cnf, kw)
class Message(Widget):
"""Message widget to display multiline text. Obsolete since Label does it too."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'message', cnf, kw)
class Radiobutton(Widget):
"""Radiobutton widget which shows only one of several buttons in on-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a radiobutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
state, takefocus, text, textvariable, underline, value, variable,
width, wraplength."""
Widget.__init__(self, master, 'radiobutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
class Scale(Widget):
"""Scale widget which can display a numerical scale."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scale widget with the parent MASTER.
Valid resource names: activebackground, background, bigincrement, bd,
bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
highlightbackground, highlightcolor, highlightthickness, label,
length, orient, relief, repeatdelay, repeatinterval, resolution,
showvalue, sliderlength, sliderrelief, state, takefocus,
tickinterval, to, troughcolor, variable, width."""
Widget.__init__(self, master, 'scale', cnf, kw)
def get(self):
"""Get the current value as integer or float."""
value = self.tk.call(self._w, 'get')
try:
return getint(value)
except ValueError:
return getdouble(value)
def set(self, value):
"""Set the value to VALUE."""
self.tk.call(self._w, 'set', value)
def coords(self, value=None):
"""Return a tuple (X,Y) of the point along the centerline of the
trough that corresponds to VALUE or the current value if None is
given."""
return self._getints(self.tk.call(self._w, 'coords', value))
def identify(self, x, y):
"""Return where the point X,Y lies. Valid return values are "slider",
"though1" and "though2"."""
return self.tk.call(self._w, 'identify', x, y)
class Scrollbar(Widget):
"""Scrollbar widget which displays a slider at a certain position."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scrollbar widget with the parent MASTER.
Valid resource names: activebackground, activerelief,
background, bd, bg, borderwidth, command, cursor,
elementborderwidth, highlightbackground,
highlightcolor, highlightthickness, jump, orient,
relief, repeatdelay, repeatinterval, takefocus,
troughcolor, width."""
Widget.__init__(self, master, 'scrollbar', cnf, kw)
def activate(self, index):
"""Display the element at INDEX with activebackground and activerelief.
INDEX can be "arrow1","slider" or "arrow2"."""
self.tk.call(self._w, 'activate', index)
def delta(self, deltax, deltay):
"""Return the fractional change of the scrollbar setting if it
would be moved by DELTAX or DELTAY pixels."""
return getdouble(
self.tk.call(self._w, 'delta', deltax, deltay))
def fraction(self, x, y):
"""Return the fractional value which corresponds to a slider
position of X,Y."""
return getdouble(self.tk.call(self._w, 'fraction', x, y))
def identify(self, x, y):
"""Return the element under position X,Y as one of
"arrow1","slider","arrow2" or ""."""
return self.tk.call(self._w, 'identify', x, y)
def get(self):
"""Return the current fractional values (upper and lower end)
of the slider position."""
return self._getdoubles(self.tk.call(self._w, 'get'))
def set(self, *args):
"""Set the fractional values of the slider position (upper and
lower ends as value between 0 and 1)."""
self.tk.call((self._w, 'set') + args)
class Text(Widget):
"""Text widget which can display text in various forms."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a text widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor,
exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, padx, pady,
relief, selectbackground,
selectborderwidth, selectforeground,
setgrid, takefocus,
xscrollcommand, yscrollcommand,
WIDGET-SPECIFIC OPTIONS
autoseparators, height, maxundo,
spacing1, spacing2, spacing3,
state, tabs, undo, width, wrap,
"""
Widget.__init__(self, master, 'text', cnf, kw)
def bbox(self, *args):
"""Return a tuple of (x,y,width,height) which gives the bounding
box of the visible part of the character at the index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tk_textSelectTo(self, index):
self.tk.call('tk_textSelectTo', self._w, index)
def tk_textBackspace(self):
self.tk.call('tk_textBackspace', self._w)
def tk_textIndexCloser(self, a, b, c):
self.tk.call('tk_textIndexCloser', self._w, a, b, c)
def tk_textResetAnchor(self, index):
self.tk.call('tk_textResetAnchor', self._w, index)
def compare(self, index1, op, index2):
"""Return whether between index INDEX1 and index INDEX2 the
relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
return self.tk.getboolean(self.tk.call(
self._w, 'compare', index1, op, index2))
def debug(self, boolean=None):
"""Turn on the internal consistency checks of the B-Tree inside the text
widget according to BOOLEAN."""
return self.tk.getboolean(self.tk.call(
self._w, 'debug', boolean))
def delete(self, index1, index2=None):
"""Delete the characters between INDEX1 and INDEX2 (not included)."""
self.tk.call(self._w, 'delete', index1, index2)
def dlineinfo(self, index):
"""Return tuple (x,y,width,height,baseline) giving the bounding box
and baseline position of the visible part of the line containing
the character at INDEX."""
return self._getints(self.tk.call(self._w, 'dlineinfo', index))
def dump(self, index1, index2=None, command=None, **kw):
"""Return the contents of the widget between index1 and index2.
The type of contents returned in filtered based on the keyword
parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are
given and true, then the corresponding items are returned. The result
is a list of triples of the form (key, value, index). If none of the
keywords are true then 'all' is used by default.
If the 'command' argument is given, it is called once for each element
of the list of triples, with the values of each triple serving as the
arguments to the function. In this case the list is not returned."""
args = []
func_name = None
result = None
if not command:
# Never call the dump command without the -command flag, since the
# output could involve Tcl quoting and would be a pain to parse
# right. Instead just set the command to build a list of triples
# as if we had done the parsing.
result = []
def append_triple(key, value, index, result=result):
result.append((key, value, index))
command = append_triple
try:
if not isinstance(command, str):
func_name = command = self._register(command)
args += ["-command", command]
for key in kw:
if kw[key]: args.append("-" + key)
args.append(index1)
if index2:
args.append(index2)
self.tk.call(self._w, "dump", *args)
return result
finally:
if func_name:
self.deletecommand(func_name)
## new in tk8.4
def edit(self, *args):
"""Internal method
This method controls the undo mechanism and
the modified flag. The exact behavior of the
command depends on the option argument that
follows the edit argument. The following forms
of the command are currently supported:
edit_modified, edit_redo, edit_reset, edit_separator
and edit_undo
"""
return self.tk.call(self._w, 'edit', *args)
def edit_modified(self, arg=None):
"""Get or Set the modified flag
If arg is not specified, returns the modified
flag of the widget. The insert, delete, edit undo and
edit redo commands or the user can set or clear the
modified flag. If boolean is specified, sets the
modified flag of the widget to arg.
"""
return self.edit("modified", arg)
def edit_redo(self):
"""Redo the last undone edit
When the undo option is true, reapplies the last
undone edits provided no other edits were done since
then. Generates an error when the redo stack is empty.
Does nothing when the undo option is false.
"""
return self.edit("redo")
def edit_reset(self):
"""Clears the undo and redo stacks
"""
return self.edit("reset")
def edit_separator(self):
"""Inserts a separator (boundary) on the undo stack.
Does nothing when the undo option is false
"""
return self.edit("separator")
def edit_undo(self):
"""Undoes the last edit action
If the undo option is true. An edit action is defined
as all the insert and delete commands that are recorded
on the undo stack in between two separators. Generates
an error when the undo stack is empty. Does nothing
when the undo option is false
"""
return self.edit("undo")
def get(self, index1, index2=None):
"""Return the text from INDEX1 to INDEX2 (not included)."""
return self.tk.call(self._w, 'get', index1, index2)
# (Image commands are new in 8.0)
def image_cget(self, index, option):
"""Return the value of OPTION of an embedded image at INDEX."""
if option[:1] != "-":
option = "-" + option
if option[-1:] == "_":
option = option[:-1]
return self.tk.call(self._w, "image", "cget", index, option)
def image_configure(self, index, cnf=None, **kw):
"""Configure an embedded image at INDEX."""
return self._configure(('image', 'configure', index), cnf, kw)
def image_create(self, index, cnf={}, **kw):
"""Create an embedded image at INDEX."""
return self.tk.call(
self._w, "image", "create", index,
*self._options(cnf, kw))
def image_names(self):
"""Return all names of embedded images in this widget."""
return self.tk.call(self._w, "image", "names")
def index(self, index):
"""Return the index in the form line.char for INDEX."""
return str(self.tk.call(self._w, 'index', index))
def insert(self, index, chars, *args):
"""Insert CHARS before the characters at INDEX. An additional
tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
self.tk.call((self._w, 'insert', index, chars) + args)
def mark_gravity(self, markName, direction=None):
"""Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
Return the current value if None is given for DIRECTION."""
return self.tk.call(
(self._w, 'mark', 'gravity', markName, direction))
def mark_names(self):
"""Return all mark names."""
return self.tk.splitlist(self.tk.call(
self._w, 'mark', 'names'))
def mark_set(self, markName, index):
"""Set mark MARKNAME before the character at INDEX."""
self.tk.call(self._w, 'mark', 'set', markName, index)
def mark_unset(self, *markNames):
"""Delete all marks in MARKNAMES."""
self.tk.call((self._w, 'mark', 'unset') + markNames)
def mark_next(self, index):
"""Return the name of the next mark after INDEX."""
return self.tk.call(self._w, 'mark', 'next', index) or None
def mark_previous(self, index):
"""Return the name of the previous mark before INDEX."""
return self.tk.call(self._w, 'mark', 'previous', index) or None
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the text to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def search(self, pattern, index, stopindex=None,
forwards=None, backwards=None, exact=None,
regexp=None, nocase=None, count=None, elide=None):
"""Search PATTERN beginning from INDEX until STOPINDEX.
Return the index of the first character of a match or an
empty string."""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def see(self, index):
"""Scroll such that the character at INDEX is visible."""
self.tk.call(self._w, 'see', index)
def tag_add(self, tagName, index1, *args):
"""Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
Additional pairs of indices may follow in ARGS."""
self.tk.call(
(self._w, 'tag', 'add', tagName, index1) + args)
def tag_unbind(self, tagName, sequence, funcid=None):
"""Unbind for all characters with TAGNAME for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagName, sequence, func, add=None):
"""Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'tag', 'bind', tagName),
sequence, func, add)
def tag_cget(self, tagName, option):
"""Return the value of OPTION for tag TAGNAME."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'tag', 'cget', tagName, option)
def tag_configure(self, tagName, cnf=None, **kw):
"""Configure a tag TAGNAME."""
return self._configure(('tag', 'configure', tagName), cnf, kw)
tag_config = tag_configure
def tag_delete(self, *tagNames):
"""Delete all tags in TAGNAMES."""
self.tk.call((self._w, 'tag', 'delete') + tagNames)
def tag_lower(self, tagName, belowThis=None):
"""Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS."""
self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
def tag_names(self, index=None):
"""Return a list of all tag names."""
return self.tk.splitlist(
self.tk.call(self._w, 'tag', 'names', index))
def tag_nextrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched forward from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'nextrange', tagName, index1, index2))
def tag_prevrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched backwards from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'prevrange', tagName, index1, index2))
def tag_raise(self, tagName, aboveThis=None):
"""Change the priority of tag TAGNAME such that it is higher
than the priority of ABOVETHIS."""
self.tk.call(
self._w, 'tag', 'raise', tagName, aboveThis)
def tag_ranges(self, tagName):
"""Return a list of ranges of text which have tag TAGNAME."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'ranges', tagName))
def tag_remove(self, tagName, index1, index2=None):
"""Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
self.tk.call(
self._w, 'tag', 'remove', tagName, index1, index2)
def window_cget(self, index, option):
"""Return the value of OPTION of an embedded window at INDEX."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'window', 'cget', index, option)
def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw)
window_config = window_configure
def window_create(self, index, cnf={}, **kw):
"""Create a window at INDEX."""
self.tk.call(
(self._w, 'window', 'create', index)
+ self._options(cnf, kw))
def window_names(self):
"""Return all names of embedded windows in this widget."""
return self.tk.splitlist(
self.tk.call(self._w, 'window', 'names'))
def xview(self, *what):
"""Query and change horizontal position of the view."""
if not what:
return self._getdoubles(self.tk.call(self._w, 'xview'))
self.tk.call((self._w, 'xview') + what)
def xview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total width of the canvas is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured
in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
def yview(self, *what):
"""Query and change vertical position of the view."""
if not what:
return self._getdoubles(self.tk.call(self._w, 'yview'))
self.tk.call((self._w, 'yview') + what)
def yview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total height of the canvas is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured
in "units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
def yview_pickplace(self, *what):
"""Obsolete function, use see."""
self.tk.call((self._w, 'yview', '-pickplace') + what)
class _setit:
"""Internal class. It wraps the command in the widget OptionMenu."""
def __init__(self, var, value, callback=None):
self.__value = value
self.__var = var
self.__callback = callback
def __call__(self, *args):
self.__var.set(self.__value)
if self.__callback:
self.__callback(self.__value, *args)
class OptionMenu(Menubutton):
"""OptionMenu which allows the user to select a value from a menu."""
def __init__(self, master, variable, value, *values, **kwargs):
"""Construct an optionmenu widget with the parent MASTER, with
the resource textvariable set to VARIABLE, the initially selected
value VALUE, the other menu values VALUES and an additional
keyword argument command."""
kw = {"borderwidth": 2, "textvariable": variable,
"indicatoron": 1, "relief": RAISED, "anchor": "c",
"highlightthickness": 2}
Widget.__init__(self, master, "menubutton", kw)
self.widgetName = 'tk_optionMenu'
menu = self.__menu = Menu(self, name="menu", tearoff=0)
self.menuname = menu._w
# 'command' is the only supported keyword
callback = kwargs.get('command')
if 'command' in kwargs:
del kwargs['command']
if kwargs:
raise TclError('unknown option -'+kwargs.keys()[0])
menu.add_command(label=value,
command=_setit(variable, value, callback))
for v in values:
menu.add_command(label=v,
command=_setit(variable, v, callback))
self["menu"] = menu
def __getitem__(self, name):
if name == 'menu':
return self.__menu
return Widget.__getitem__(self, name)
def destroy(self):
"""Destroy this widget and the associated menu."""
Menubutton.destroy(self)
self.__menu = None
class Image:
"""Base class for images."""
_last_id = 0
def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
self.name = None
if not master:
master = _default_root
if not master:
raise RuntimeError('Too early to create image')
self.tk = master.tk
if not name:
Image._last_id += 1
name = "pyimage%r" % (Image._last_id,) # tk itself would use image<x>
# The following is needed for systems where id(x)
# can return a negative number, such as Linux/m68k:
if name[0] == '-': name = '_' + name[1:]
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if hasattr(v, '__call__'):
v = self._register(v)
options = options + ('-'+k, v)
self.tk.call(('image', 'create', imgtype, name,) + options)
self.name = name
def __str__(self): return self.name
def __del__(self):
if self.name:
try:
self.tk.call('image', 'delete', self.name)
except TclError:
# May happen if the root was destroyed
pass
def __setitem__(self, key, value):
self.tk.call(self.name, 'configure', '-'+key, value)
def __getitem__(self, key):
return self.tk.call(self.name, 'configure', '-'+key)
def configure(self, **kw):
"""Configure the image."""
res = ()
for k, v in _cnfmerge(kw).items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
res = res + ('-'+k, v)
self.tk.call((self.name, 'config') + res)
config = configure
def height(self):
"""Return the height of the image."""
return getint(
self.tk.call('image', 'height', self.name))
def type(self):
"""Return the type of the imgage, e.g. "photo" or "bitmap"."""
return self.tk.call('image', 'type', self.name)
def width(self):
"""Return the width of the image."""
return getint(
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
"""Widget which can display colored images in GIF, PPM/PGM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
Valid resource names: data, format, file, gamma, height, palette,
width."""
Image.__init__(self, 'photo', name, cnf, master, **kw)
def blank(self):
"""Display a transparent image."""
self.tk.call(self.name, 'blank')
def cget(self, option):
"""Return the value of OPTION."""
return self.tk.call(self.name, 'cget', '-' + option)
# XXX config
def __getitem__(self, key):
return self.tk.call(self.name, 'cget', '-' + key)
# XXX copy -from, -to, ...?
def copy(self):
"""Return a new PhotoImage with the same image as this widget."""
destImage = PhotoImage()
self.tk.call(destImage, 'copy', self.name)
return destImage
def zoom(self,x,y=''):
"""Return a new PhotoImage with the same image as this widget
but zoom it with X and Y."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
return destImage
def subsample(self,x,y=''):
"""Return a new PhotoImage based on the same image as this widget
but use only every Xth or Yth pixel."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
return destImage
def get(self, x, y):
"""Return the color (red, green, blue) of the pixel at X,Y."""
return self.tk.call(self.name, 'get', x, y)
def put(self, data, to=None):
"""Put row formatted colors to image starting from
position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
args = (self.name, 'put', data)
if to:
if to[0] == '-to':
to = to[1:]
args = args + ('-to',) + tuple(to)
self.tk.call(args)
# XXX read
def write(self, filename, format=None, from_coords=None):
"""Write image to file FILENAME in FORMAT starting from
position FROM_COORDS."""
args = (self.name, 'write', filename)
if format:
args = args + ('-format', format)
if from_coords:
args = args + ('-from',) + tuple(from_coords)
self.tk.call(args)
class BitmapImage(Image):
"""Widget which can display a bitmap."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
Valid resource names: background, data, file, foreground, maskdata, maskfile."""
Image.__init__(self, 'bitmap', name, cnf, master, **kw)
def image_names(): return _default_root.tk.call('image', 'names')
def image_types(): return _default_root.tk.call('image', 'types')
class Spinbox(Widget):
"""spinbox widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a spinbox widget with the parent MASTER.
STANDARD OPTIONS
activebackground, background, borderwidth,
cursor, exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, justify, relief,
repeatdelay, repeatinterval,
selectbackground, selectborderwidth
selectforeground, takefocus, textvariable
xscrollcommand.
WIDGET-SPECIFIC OPTIONS
buttonbackground, buttoncursor,
buttondownrelief, buttonuprelief,
command, disabledbackground,
disabledforeground, format, from,
invalidcommand, increment,
readonlybackground, state, to,
validate, validatecommand values,
width, wrap,
"""
Widget.__init__(self, master, 'spinbox', cnf, kw)
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a
rectangle which encloses the character given by index.
The first two elements of the list give the x and y
coordinates of the upper-left corner of the screen
area covered by the character (in pixels relative
to the widget) and the last two elements give the
width and height of the character, in pixels. The
bounding box may refer to a region outside the
visible area of the window.
"""
return self.tk.call(self._w, 'bbox', index)
def delete(self, first, last=None):
"""Delete one or more elements of the spinbox.
First is the index of the first character to delete,
and last is the index of the character just after
the last one to delete. If last isn't specified it
defaults to first+1, i.e. a single character is
deleted. This command returns an empty string.
"""
return self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Returns the spinbox's string"""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Alter the position of the insertion cursor.
The insertion cursor will be displayed just before
the character given by index. Returns an empty string
"""
return self.tk.call(self._w, 'icursor', index)
def identify(self, x, y):
"""Returns the name of the widget at position x, y
Return value is one of: none, buttondown, buttonup, entry
"""
return self.tk.call(self._w, 'identify', x, y)
def index(self, index):
"""Returns the numerical index corresponding to index
"""
return self.tk.call(self._w, 'index', index)
def insert(self, index, s):
"""Insert string s at index
Returns an empty string.
"""
return self.tk.call(self._w, 'insert', index, s)
def invoke(self, element):
"""Causes the specified element to be invoked
The element could be buttondown or buttonup
triggering the action associated with it.
"""
return self.tk.call(self._w, 'invoke', element)
def scan(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'scan') + args)) or ()
def scan_mark(self, x):
"""Records x and the current view in the spinbox window;
used in conjunction with later scan dragto commands.
Typically this command is associated with a mouse button
press in the widget. It returns an empty string.
"""
return self.scan("mark", x)
def scan_dragto(self, x):
"""Compute the difference between the given x argument
and the x argument to the last scan mark command
It then adjusts the view left or right by 10 times the
difference in x-coordinates. This command is typically
associated with mouse motion events in the widget, to
produce the effect of dragging the spinbox at high speed
through the window. The return value is an empty string.
"""
return self.scan("dragto", x)
def selection(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'selection') + args)) or ()
def selection_adjust(self, index):
"""Locate the end of the selection nearest to the character
given by index,
Then adjust that end of the selection to be at index
(i.e including but not going beyond index). The other
end of the selection is made the anchor point for future
select to commands. If the selection isn't currently in
the spinbox, then a new selection is created to include
the characters between index and the most recent selection
anchor point, inclusive. Returns an empty string.
"""
return self.selection("adjust", index)
def selection_clear(self):
"""Clear the selection
If the selection isn't in this widget then the
command has no effect. Returns an empty string.
"""
return self.selection("clear")
def selection_element(self, element=None):
"""Sets or gets the currently selected element.
If a spinbutton element is specified, it will be
displayed depressed
"""
return self.selection("element", element)
###########################################################################
class LabelFrame(Widget):
"""labelframe widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a labelframe widget with the parent MASTER.
STANDARD OPTIONS
borderwidth, cursor, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, padx, pady, relief,
takefocus, text
WIDGET-SPECIFIC OPTIONS
background, class, colormap, container,
height, labelanchor, labelwidget,
visual, width
"""
Widget.__init__(self, master, 'labelframe', cnf, kw)
########################################################################
class PanedWindow(Widget):
"""panedwindow widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a panedwindow widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor, height,
orient, relief, width
WIDGET-SPECIFIC OPTIONS
handlepad, handlesize, opaqueresize,
sashcursor, sashpad, sashrelief,
sashwidth, showhandle,
"""
Widget.__init__(self, master, 'panedwindow', cnf, kw)
def add(self, child, **kw):
"""Add a child widget to the panedwindow in a new pane.
The child argument is the name of the child widget
followed by pairs of arguments that specify how to
manage the windows. The possible options and values
are the ones accepted by the paneconfigure method.
"""
self.tk.call((self._w, 'add', child) + self._options(kw))
def remove(self, child):
"""Remove the pane containing child from the panedwindow
All geometry management options for child will be forgotten.
"""
self.tk.call(self._w, 'forget', child)
forget=remove
def identify(self, x, y):
"""Identify the panedwindow component at point x, y
If the point is over a sash or a sash handle, the result
is a two element list containing the index of the sash or
handle, and a word indicating whether it is over a sash
or a handle, such as {0 sash} or {2 handle}. If the point
is over any other part of the panedwindow, the result is
an empty list.
"""
return self.tk.call(self._w, 'identify', x, y)
def proxy(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'proxy') + args)) or ()
def proxy_coord(self):
"""Return the x and y pair of the most recent proxy location
"""
return self.proxy("coord")
def proxy_forget(self):
"""Remove the proxy from the display.
"""
return self.proxy("forget")
def proxy_place(self, x, y):
"""Place the proxy at the given x and y coordinates.
"""
return self.proxy("place", x, y)
def sash(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'sash') + args)) or ()
def sash_coord(self, index):
"""Return the current x and y pair for the sash given by index.
Index must be an integer between 0 and 1 less than the
number of panes in the panedwindow. The coordinates given are
those of the top left corner of the region containing the sash.
pathName sash dragto index x y This command computes the
difference between the given coordinates and the coordinates
given to the last sash coord command for the given sash. It then
moves that sash the computed difference. The return value is the
empty string.
"""
return self.sash("coord", index)
def sash_mark(self, index):
"""Records x and y for the sash given by index;
Used in conjunction with later dragto commands to move the sash.
"""
return self.sash("mark", index)
def sash_place(self, index, x, y):
"""Place the sash given by index at the given coordinates
"""
return self.sash("place", index, x, y)
def panecget(self, child, option):
"""Query a management option for window.
Option may be any value allowed by the paneconfigure subcommand
"""
return self.tk.call(
(self._w, 'panecget') + (child, '-'+option))
def paneconfigure(self, tagOrId, cnf=None, **kw):
"""Query or modify the management options for window.
If no option is specified, returns a list describing all
of the available options for pathName. If option is
specified with no value, then the command returns a list
describing the one named option (this list will be identical
to the corresponding sublist of the value returned if no
option is specified). If one or more option-value pairs are
specified, then the command modifies the given widget
option(s) to have the given value(s); in this case the
command returns an empty string. The following options
are supported:
after window
Insert the window after the window specified. window
should be the name of a window already managed by pathName.
before window
Insert the window before the window specified. window
should be the name of a window already managed by pathName.
height size
Specify a height for the window. The height will be the
outer dimension of the window including its border, if
any. If size is an empty string, or if -height is not
specified, then the height requested internally by the
window will be used initially; the height may later be
adjusted by the movement of sashes in the panedwindow.
Size may be any value accepted by Tk_GetPixels.
minsize n
Specifies that the size of the window cannot be made
less than n. This constraint only affects the size of
the widget in the paned dimension -- the x dimension
for horizontal panedwindows, the y dimension for
vertical panedwindows. May be any value accepted by
Tk_GetPixels.
padx n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the X-direction. The value may have any of the forms
accepted by Tk_GetPixels.
pady n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the Y-direction. The value may have any of the forms
accepted by Tk_GetPixels.
sticky style
If a window's pane is larger than the requested
dimensions of the window, this option may be used
to position (or stretch) the window within its pane.
Style is a string that contains zero or more of the
characters n, s, e or w. The string can optionally
contains spaces or commas, but they are ignored. Each
letter refers to a side (north, south, east, or west)
that the window will "stick" to. If both n and s
(or e and w) are specified, the window will be
stretched to fill the entire height (or width) of
its cavity.
width size
Specify a width for the window. The width will be
the outer dimension of the window including its
border, if any. If size is an empty string, or
if -width is not specified, then the width requested
internally by the window will be used initially; the
width may later be adjusted by the movement of sashes
in the panedwindow. Size may be any value accepted by
Tk_GetPixels.
"""
if cnf is None and not kw:
cnf = {}
for x in self.tk.split(
self.tk.call(self._w,
'paneconfigure', tagOrId)):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if isinstance(cnf, str) and not kw:
x = self.tk.split(self.tk.call(
self._w, 'paneconfigure', tagOrId, '-'+cnf))
return (x[0][1:],) + x[1:]
self.tk.call((self._w, 'paneconfigure', tagOrId) +
self._options(cnf, kw))
paneconfig = paneconfigure
def panes(self):
"""Returns an ordered list of the child panes."""
return self.tk.call(self._w, 'panes')
######################################################################
# Extensions:
class Studbutton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'studbutton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
class Tributton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'tributton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
self['fg'] = self['bg']
self['activebackground'] = self['bg']
######################################################################
# Test:
def _test():
root = Tk()
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
text += "\nThis should be a cedilla: \xe7"
label = Label(root, text=text)
label.pack()
test = Button(root, text="Click me!",
command=lambda root=root: root.test.configure(
text="[%s]" % root.test['text']))
test.pack()
root.test = test
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
root.mainloop()
if __name__ == '__main__':
_test()
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
pricecollectr/pricecollectr/wsgi.py | """
WSGI config for pricecollectr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pricecollectr.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
creds.py | import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
class cred():
BOT_TOKEN = os.getenv("BOT_TOKEN") #From botfather
API_ID = os.getenv("API_ID") #"Get this value from my.telegram.org! Please do not steal"
API_HASH = os.getenv("API_HASH") #"Get this value from my.telegram.org! Please do not steal"
OWNER_ID = os.getenv("OWNER_ID") # Owner of the bot
DB_URL = os.getenv("DB_URL") #From Firebase database
DB_SECRET= os.getenv("DB_SECRET") #From Firebase database
DB_MAIL= os.getenv("DB_MAIL") #From Firebase database
####From Truecaller and Eyecon app request headers respectively########
T_AUTH = os.getenv("T_AUTH") # Truecaller auth id CA
E_AUTH = os.getenv("E_AUTH") # Eyecon auth id
E_AUTH_V=os.getenv("E_AUTH_V") # Eyecon auth_v
E_AUTH_C=os.getenv("E_AUTH_C") # Eyecon auth_c
| []
| []
| [
"T_AUTH",
"OWNER_ID",
"DB_SECRET",
"DB_MAIL",
"E_AUTH",
"BOT_TOKEN",
"E_AUTH_V",
"E_AUTH_C",
"API_ID",
"API_HASH",
"DB_URL"
]
| [] | ["T_AUTH", "OWNER_ID", "DB_SECRET", "DB_MAIL", "E_AUTH", "BOT_TOKEN", "E_AUTH_V", "E_AUTH_C", "API_ID", "API_HASH", "DB_URL"] | python | 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.