filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
โ | variableargcount
float64 0
0
โ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
flagconfig.go | // Package flagconfig provides a flag to specifiy a config file which
// will in turn be used to read unspecified flag values.
package flagconfig
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
)
var configFile = flag.String(
"c", defaultConfig(), "Config file to read flags from.")
func Usage() {
Parse()
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.VisitAll(func(f *flag.Flag) {
fmt.Fprintf(os.Stderr, " -%s=%s: %s\n", f.Name, f.Value.String(), f.Usage)
})
}
func defaultConfig() string {
home := os.Getenv("HOME")
basename := filepath.Base(os.Args[0])
path := filepath.Join(home, ".config", basename, "config")
_, err := os.Open(path)
if err == nil {
return path
}
path = filepath.Join("/", "etc", "conf.d", basename)
_, err = os.Open(path)
if err == nil {
return path
}
return ""
}
func contains(list []*flag.Flag, f *flag.Flag) bool {
for _, i := range list {
if i == f {
return true
}
}
return false
}
func readConfig() map[string]string {
bytes, err := ioutil.ReadFile(*configFile)
if err != nil {
log.Fatalf("Failed to read config file %s: %s", *configFile, err)
}
lines := strings.Split(string(bytes), "\n")
result := make(map[string]string, len(lines))
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed == "" || trimmed[0] == '#' {
continue
}
parts := strings.Split(line, "=")
if len(parts) != 2 {
log.Fatalf("Invalid config line: %s", line)
}
result[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
return result
}
func Parse() {
ParseSet(flag.CommandLine)
}
func ParseSet(set *flag.FlagSet) {
if *configFile == "" {
return
}
config := readConfig()
explicit := make([]*flag.Flag, 0)
all := make([]*flag.Flag, 0)
set.Visit(func(f *flag.Flag) {
explicit = append(explicit, f)
})
set.VisitAll(func(f *flag.Flag) {
all = append(all, f)
if !contains(explicit, f) {
val := config[f.Name]
if val != "" {
err := f.Value.Set(val)
if err != nil {
log.Fatalf("Failed to set flag %s with value %s", f.Name, val)
}
}
}
})
Outer:
for name, val := range config {
for _, f := range all {
if f.Name == name {
continue Outer
}
}
log.Fatalf("Unknown flag %s=%s in config file.", name, val)
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
samples/daal/java/hadoop/sources/LinearRegressionQR.java | /* file: LinearRegressionQR.java */
/*******************************************************************************
* Copyright 2017-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
// Content:
// Java sample of multiple linear regression in the distributed processing
// mode.
//
// The program trains the multiple linear regression model on a training
// data set with a QR decomposition-based method and computes regression for
// the test data.
////////////////////////////////////////////////////////////////////////////////
*/
package DAAL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import java.net.URI;
import com.intel.daal.data_management.data.*;
import com.intel.daal.data_management.data_source.*;
import com.intel.daal.services.*;
/* Implement Tool to be able to pass -libjars on start */
public class LinearRegressionQR extends Configured implements Tool {
public static void main(String[] args) {
int res = -1;
try {
res = ToolRunner.run(new Configuration(), new LinearRegressionQR(), args);
} catch (Exception e) {
ErrorHandling.printThrowable(e);
}
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = this.getConf();
/* Put shared libraries into the distributed cache */
DistributedCache.createSymlink(conf);
DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBJAVAAPI")), conf);
DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBTBB")), conf);
DistributedCache.addCacheFile(new URI("/Hadoop/Libraries/" + System.getenv("LIBTBBMALLOC")), conf);
Job job = new Job(conf, "Linear regression with normal equations method (normEq) Job");
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setMapperClass(LinearRegressionQRStep1TrainingMapper.class);
job.setReducerClass(LinearRegressionQRStep2TrainingReducerAndPrediction.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(WriteableData.class);
job.setJarByClass(LinearRegressionQR.class);
return job.waitForCompletion(true) ? 0 : 1;
}
}
| [
"\"LIBJAVAAPI\"",
"\"LIBTBB\"",
"\"LIBTBBMALLOC\""
]
| []
| [
"LIBTBB",
"LIBTBBMALLOC",
"LIBJAVAAPI"
]
| [] | ["LIBTBB", "LIBTBBMALLOC", "LIBJAVAAPI"] | java | 3 | 0 | |
util.go | package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"github.com/streadway/amqp"
)
func LogOnError(err error) {
if err != nil {
fmt.Printf("ERROR - %s\n", err)
}
}
func PanicOnError(err error) {
if err != nil {
panic(err)
}
}
func setupChannel() (*amqp.Connection, *amqp.Channel, error) {
url := os.Getenv("AMQP_URL")
conn, err := amqp.Dial(url)
if err != nil {
LogOnError(err)
return nil, nil, err
}
channel, err := conn.Channel()
if err != nil {
LogOnError(err)
return nil, nil, err
}
err = channel.Qos(1, 0, false)
if err != nil {
LogOnError(err)
return nil, nil, err
}
log.Printf("setup channel success!")
return conn, channel, nil
}
func cloneToPublishMsg(msg *amqp.Delivery) *amqp.Publishing {
newMsg := amqp.Publishing{
Headers: msg.Headers,
ContentType: msg.ContentType,
ContentEncoding: msg.ContentEncoding,
DeliveryMode: msg.DeliveryMode,
Priority: msg.Priority,
CorrelationId: msg.CorrelationId,
ReplyTo: msg.ReplyTo,
Expiration: msg.Expiration,
MessageId: msg.MessageId,
Timestamp: msg.Timestamp,
Type: msg.Type,
UserId: msg.UserId,
AppId: msg.AppId,
Body: msg.Body,
}
return &newMsg
}
func newHttpClient(maxIdleConns, maxIdleConnsPerHost, idleConnTimeout int) *http.Client {
tr := &http.Transport{
MaxIdleConns: maxIdleConns,
MaxIdleConnsPerHost: maxIdleConnsPerHost,
IdleConnTimeout: time.Duration(idleConnTimeout) * time.Second,
}
client := &http.Client{
Transport: tr,
}
return client
}
func notifyUrl(client *http.Client, url string, body []byte) int {
req, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
log.Printf("notify url create req fail: %s", err)
return 0
}
req.Header.Set("Content-Type", "application/json")
response, err := client.Do(req)
if err != nil {
log.Printf("notify url %s fail: %s", url, err)
return 0
}
defer response.Body.Close()
io.Copy(ioutil.Discard, response.Body)
return response.StatusCode
}
| [
"\"AMQP_URL\""
]
| []
| [
"AMQP_URL"
]
| [] | ["AMQP_URL"] | go | 1 | 0 | |
buildpack/stage.py | #!/usr/bin/env python3
import logging
import os
import shutil
import sys
from buildpack import databroker, util
from buildpack.core import java, mxbuild, nginx, runtime
from buildpack.infrastructure import database
from buildpack.telemetry import (
appdynamics,
datadog,
dynatrace,
logs,
metering,
mx_java_agent,
newrelic,
telegraf,
)
BUILDPACK_DIR = os.path.abspath(
os.path.dirname(
os.path.dirname(os.path.join(os.path.dirname(__file__), ".."))
)
)
BUILD_DIR = os.path.abspath(sys.argv[1])
CACHE_DIR = os.path.abspath(os.path.join(sys.argv[2], "bust"))
DOT_LOCAL_LOCATION = os.path.abspath(os.path.join(BUILD_DIR, ".local"))
if len(sys.argv) >= 5:
DEPS_DIR = os.path.abspath(sys.argv[3])
DEPS_IDX = os.path.abspath(sys.argv[4])
if len(sys.argv) >= 6 and sys.argv[5] != "":
PROFILE_DIR = os.path.abspath(sys.argv[5])
else:
PROFILE_DIR = os.path.abspath(os.path.join(BUILD_DIR, ".profile.d"))
SUPPORTED_STACKS = [
"cflinuxfs3",
None,
] # None is allowed, but not supported
def check_database_environment():
try:
database.get_config()
return True
except RuntimeError as ex:
logging.error(
"You should provide a DATABASE_URL by adding a database service "
"to this application, it can be either MySQL or Postgres "
"If this is the first push of a new app, "
"set up a database service "
"and push again afterwards: %s",
ex,
)
return False
def preflight_check(version):
if not check_database_environment():
raise ValueError("Missing database configuration")
stack = os.getenv("CF_STACK")
logging.info(
"Preflight check on Mendix version [%s] and stack [%s]...",
version,
stack,
)
if not stack in SUPPORTED_STACKS:
raise NotImplementedError(
"Stack [{}] is not supported by this buildpack".format(stack)
)
if not runtime.is_version_implemented(version):
raise NotImplementedError(
"Mendix [{}] is not supported by this buildpack".format(
version.major
)
)
if not runtime.is_version_supported(version):
logging.warning(
"Mendix [{}] is end-of-support. Please use a supported Mendix version (https://docs.mendix.com/releasenotes/studio-pro/lts-mts).".format(
version.major
)
)
elif not runtime.is_version_maintained(version):
logging.info(
"Mendix [{}.{}] is not maintained. Please use a medium- or long-term supported Mendix version to easily receive fixes (https://docs.mendix.com/releasenotes/studio-pro/lts-mts).".format(
version.major, version.minor
)
)
logging.info("Preflight check completed")
def set_up_directory_structure():
logging.debug("Creating buildpack directory structure...")
util.mkdir_p(DOT_LOCAL_LOCATION)
def set_up_launch_environment():
logging.debug("Creating buildpack launch environment...")
util.mkdir_p(PROFILE_DIR)
util.set_up_launch_environment(DEPS_DIR, PROFILE_DIR)
def copy_buildpack_resources():
shutil.copytree(
os.path.join(BUILDPACK_DIR, "buildpack"),
os.path.join(BUILD_DIR, "buildpack"),
)
shutil.copytree(
os.path.join(BUILDPACK_DIR, "lib"), os.path.join(BUILD_DIR, "lib")
)
commit_file_path = os.path.join(BUILDPACK_DIR, ".commit")
if os.path.isfile(commit_file_path):
shutil.copy(
commit_file_path,
os.path.join(BUILD_DIR, ".commit"),
)
shutil.copy(
os.path.join(BUILDPACK_DIR, "VERSION"),
os.path.join(BUILD_DIR, "VERSION"),
)
def get_mpr_file():
return util.get_mpr_file_from_dir(BUILD_DIR)
def is_source_push():
if get_mpr_file() is not None:
return True
else:
return False
if __name__ == "__main__":
logging.basicConfig(
level=util.get_buildpack_loglevel(),
stream=sys.stdout,
format="%(levelname)s: %(message)s",
)
runtime_version = runtime.get_runtime_version(BUILD_DIR)
try:
preflight_check(runtime_version)
except (ValueError, NotImplementedError) as error:
logging.error(error)
exit(1)
if is_source_push():
try:
mxbuild.build_from_source(
BUILDPACK_DIR,
BUILD_DIR,
CACHE_DIR,
DOT_LOCAL_LOCATION,
runtime_version,
runtime.get_java_version(runtime_version),
)
except RuntimeError as error:
logging.error(error)
exit(1)
set_up_directory_structure()
copy_buildpack_resources()
set_up_launch_environment()
java.stage(
BUILDPACK_DIR,
CACHE_DIR,
DOT_LOCAL_LOCATION,
runtime.get_java_version(runtime_version),
)
appdynamics.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
dynatrace.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
newrelic.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
mx_java_agent.stage(
BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR, runtime_version
)
telegraf.stage(
BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR, runtime_version
)
datadog.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
metering.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
database.stage(BUILDPACK_DIR, BUILD_DIR)
runtime.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
logs.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
databroker.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
nginx.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
logging.info("Mendix Cloud Foundry Buildpack staging completed")
| []
| []
| [
"CF_STACK"
]
| [] | ["CF_STACK"] | python | 1 | 0 | |
pkg/vaults/vaultcli/helpers.go | package vaultcli
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/jenkins-x/jx-helpers/v3/pkg/cmdrunner"
"github.com/jenkins-x/jx-helpers/v3/pkg/files"
"github.com/jenkins-x/jx-logging/v3/pkg/log"
"github.com/jenkins-x/jx-secret/pkg/plugins"
"github.com/jenkins-x/jx-secret/pkg/vaults"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// VerifyVaultBinary verifies the vault binary
func VerifyVaultBinary(commandRunner cmdrunner.CommandRunner, env map[string]string) (string, error) {
vaultBin := os.Getenv("VAULT_BIN")
if vaultBin == "" {
var err error
vaultBin, err = plugins.GetVaultBinary(plugins.VaultVersion)
if err != nil {
return "", errors.Wrapf(err, "failed to find version %s of the vault plugin binary", plugins.VaultVersion)
}
}
log.Logger().Infof("verifying we have vault installed")
// lets verify we can find the binary
cmd := &cmdrunner.Command{
Name: vaultBin,
Args: []string{"version"},
Env: env,
}
_, err := commandRunner(cmd)
if err != nil {
return "", errors.Wrapf(err, "failed to invoke the binary %s. Please make sure you installed 'vault' and put it on your $PATH", vaultBin)
}
return vaultBin, nil
}
// CreateVaultEnv creates the vault env vars
func CreateVaultEnv(kubeClient kubernetes.Interface) (map[string]string, error) {
addr := os.Getenv("VAULT_ADDR")
if addr == "" {
addr = "https://127.0.0.1:8200"
}
ns := os.Getenv("VAULT_NAMESPACE")
if ns == "" {
ns = vaults.DefaultVaultNamespace
}
token := os.Getenv("VAULT_TOKEN")
var err error
if token == "" {
token, err = getSecretKey(kubeClient, ns, "vault-unseal-keys", "vault-root")
if err != nil {
return nil, err
}
}
caCertFile := os.Getenv("VAULT_CACERT")
if caCertFile == "" {
tmpDir, err := ioutil.TempDir("", "jx-secret-vault-") //nolint:govet
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp dir")
}
caCert, err := getSecretKey(kubeClient, ns, "vault-tls", "ca.crt")
if err != nil {
return nil, err
}
caCertFile = filepath.Join(tmpDir, "vault-ca.crt")
err = ioutil.WriteFile(caCertFile, []byte(caCert), files.DefaultFileWritePermissions)
if err != nil {
return nil, errors.Wrapf(err, "failed to save CA Cert file %s", caCertFile)
}
}
env := map[string]string{
"VAULT_ADDR": addr,
"VAULT_TOKEN": token,
"VAULT_CACERT": caCertFile,
}
return env, nil
}
func getSecretKey(kubeClient kubernetes.Interface, ns, secretName, key string) (string, error) {
secret, err := kubeClient.CoreV1().Secrets(ns).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
err = nil
}
if err != nil {
return "", errors.Wrapf(err, "failed to find secret %s in namespace %s", secretName, ns)
}
if secret == nil || secret.Data == nil {
return "", errors.Errorf("no data for secret %s in namespace %s", secretName, ns)
}
value := secret.Data[key]
if len(value) == 0 {
return "", errors.Errorf("no '%s' entry for secret %s in namespace %s", key, secretName, ns)
}
return string(value), nil
}
| [
"\"VAULT_BIN\"",
"\"VAULT_ADDR\"",
"\"VAULT_NAMESPACE\"",
"\"VAULT_TOKEN\"",
"\"VAULT_CACERT\""
]
| []
| [
"VAULT_TOKEN",
"VAULT_BIN",
"VAULT_ADDR",
"VAULT_NAMESPACE",
"VAULT_CACERT"
]
| [] | ["VAULT_TOKEN", "VAULT_BIN", "VAULT_ADDR", "VAULT_NAMESPACE", "VAULT_CACERT"] | go | 5 | 0 | |
test_init_final.py | # -*- coding: utf-8 -*-
################ Server V14 #####################
import os
import sys
import asyncio
import discord
import datetime
import random
import math
import logging
from discord.ext import commands
from gtts import gTTS
from github import Github
import base64
import re #์ ์ฐ
import gspread #์ ์ฐ
from oauth2client.service_account import ServiceAccountCredentials #์ ์ฐ
from io import StringIO
import urllib.request
##################### ๋ก๊น
###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
basicSetting = []
bossData = []
fixed_bossData = []
bossNum = 0
fixed_bossNum = 0
chkvoicechannel = 0
chkrelogin = 0
chflg = 0
LoadChk = 0
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
channel_info = []
channel_name = []
channel_id = []
channel_voice_name = []
channel_voice_id = []
channel_type = []
FixedBossDateData = []
indexFixedBossname = []
client = discord.Client()
access_token = os.environ["BOT_TOKEN"]
git_access_token = os.environ["GIT_TOKEN"]
git_access_repo = os.environ["GIT_REPO"]
git_access_repo_restart = os.environ["GIT_REPO_RESTART"]
g = Github(git_access_token)
repo = g.get_repo(git_access_repo)
repo_restart = g.get_repo(git_access_repo_restart)
def init():
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_voice_name
global channel_voice_id
global channel_id
global channel_type
global LoadChk
global indexFixedBossname
global FixedBossDateData
global endTime
global gc #์ ์ฐ
global credentials #์ ์ฐ
global regenembed
global ringembed
global command
command = []
tmp_bossData = []
tmp_fixed_bossData = []
FixedBossDateData = []
indexFixedBossname = []
f = []
fb = []
#print("test")
inidata = repo.get_contents("test_setting.ini")
file_data1 = base64.b64decode(inidata.content)
file_data1 = file_data1.decode('utf-8')
inputData = file_data1.split('\n')
command_inidata = repo.get_contents("command.ini")
file_data4 = base64.b64decode(command_inidata.content)
file_data4 = file_data4.decode('utf-8')
command_inputData = file_data4.split('\n')
boss_inidata = repo.get_contents("boss.ini")
file_data3 = base64.b64decode(boss_inidata.content)
file_data3 = file_data3.decode('utf-8')
boss_inputData = file_data3.split('\n')
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
index_fixed = 0
for value in FixedBossDateData:
if value.find('bossname') != -1:
indexFixedBossname.append(index_fixed)
index_fixed = index_fixed + 1
for i in range(inputData.count('\r')):
inputData.remove('\r')
for i in range(command_inputData.count('\r')):
command_inputData.remove('\r')
for i in range(boss_inputData.count('\r')):
boss_inputData.remove('\r')
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
del(command_inputData[0])
del(boss_inputData[0])
del(fixed_inputData[0])
############## ๋ณดํ๋ด ์ด๊ธฐ ์ค์ ๋ฆฌ์คํธ #####################
basicSetting.append(inputData[0][11:]) #basicSetting[0] : timezone
basicSetting.append(inputData[5][15:]) #basicSetting[1] : before_alert
basicSetting.append(inputData[7][10:]) #basicSetting[2] : mungChk
basicSetting.append(inputData[6][16:]) #basicSetting[3] : before_alert1
basicSetting.append(inputData[9][14:16]) #basicSetting[4] : restarttime ์
basicSetting.append(inputData[9][17:]) #basicSetting[5] : restarttime ๋ถ
basicSetting.append(inputData[1][15:]) #basicSetting[6] : voice์ฑ๋ ID
basicSetting.append(inputData[2][14:]) #basicSetting[7] : text์ฑ๋ ID
basicSetting.append(inputData[3][16:]) #basicSetting[8] : ์ฌ๋ค๋ฆฌ ์ฑ๋ ID
basicSetting.append(inputData[8][14:]) #basicSetting[9] : !ใ
์ถ๋ ฅ ์
basicSetting.append(inputData[12][11:]) #basicSetting[10] : json ํ์ผ๋ช
basicSetting.append(inputData[4][17:]) #basicSetting[11] : ์ ์ฐ ์ฑ๋ ID
basicSetting.append(inputData[11][12:]) #basicSetting[12] : sheet ์ด๋ฆ
basicSetting.append(inputData[10][16:]) #basicSetting[13] : restart ์ฃผ๊ธฐ
basicSetting.append(inputData[13][12:]) #basicSetting[14] : ์ํธ ์ด๋ฆ
basicSetting.append(inputData[14][12:]) #basicSetting[15] : ์
๋ ฅ ์
basicSetting.append(inputData[15][13:]) #basicSetting[16] : ์ถ๋ ฅ ์
############## ๋ณดํ๋ด ๋ช
๋ น์ด ๋ฆฌ์คํธ #####################
for i in range(len(command_inputData)):
command.append(command_inputData[i][12:].rstrip('\r')) #command[0] ~ [23] : ๋ช
๋ น์ด
for i in range(len(basicSetting)):
basicSetting[i] = basicSetting[i].strip()
if basicSetting[6] != "":
basicSetting[6] = int(basicSetting[6])
if basicSetting[7] != "":
basicSetting[7] = int(basicSetting[7])
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if int(basicSetting[13]) == 0 :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
endTime = endTime + datetime.timedelta(days=int(1000))
else :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
if endTime < tmp_now :
endTime = endTime + datetime.timedelta(days=int(basicSetting[13]))
### ์ฑ๋ ๊ณ ์ ###
#basicSetting[6] = int('597781866681991198') #๋ณด์ด์ค์ฑ๋ID
#basicSetting[7] = int('597782016607649829') #ํ์คํธ์ฑ๋ID
bossNum = int(len(boss_inputData)/5)
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(bossNum):
tmp_bossData.append(boss_inputData[i*5:i*5+5])
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
#print (tmp_bossData)
for j in range(bossNum):
for i in range(len(tmp_bossData[j])):
tmp_bossData[j][i] = tmp_bossData[j][i].strip()
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
############## ์ผ๋ฐ๋ณด์ค ์ ๋ณด ๋ฆฌ์คํธ #####################
for j in range(bossNum):
tmp_len = tmp_bossData[j][1].find(':')
f.append(tmp_bossData[j][0][11:]) #bossData[0] : ๋ณด์ค๋ช
f.append(tmp_bossData[j][1][10:tmp_len]) #bossData[1] : ์
f.append(tmp_bossData[j][2][13:]) #bossData[2] : ๋ฉ/๋ฏธ์
๋ ฅ
f.append(tmp_bossData[j][3][20:]) #bossData[3] : ๋ถ์ ์๋ฆผ๋ฉํธ
f.append(tmp_bossData[j][4][13:]) #bossData[4] : ์ ์๋ฆผ๋ฉํธ
f.append(tmp_bossData[j][1][tmp_len+1:]) #bossData[5] : ๋ถ
f.append('') #bossData[6] : ๋ฉ์ธ์ง
bossData.append(f)
f = []
bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
tmp_bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
bossTimeString.append('99:99:99')
bossDateString.append('9999-99-99')
tmp_bossTimeString.append('99:99:99')
tmp_bossDateString.append('9999-99-99')
bossFlag.append(False)
bossFlag0.append(False)
bossMungFlag.append(False)
bossMungCnt.append(0)
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## ๊ณ ์ ๋ณด์ค ์ ๋ณด ๋ฆฌ์คํธ #####################
for j in range(fixed_bossNum):
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : ๋ณด์ค๋ช
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : ์
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : ๋ถ
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : ๋ถ์ ์๋ฆผ๋ฉํธ
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : ์ ์๋ฆผ๋ฉํธ
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : ์ ์ฃผ๊ธฐ-์
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : ์ ์ฃผ๊ธฐ-๋ถ
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : ์์์ผ-๋
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : ์์์ผ-์
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : ์์์ผ-์ผ
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
################# ๋ฆฌ์ ๋ณด์ค ์๊ฐ ์ ๋ ฌ ######################
regenData = []
regenTime = []
regenbossName = []
outputTimeHour = []
outputTimeMin = []
for i in range(bossNum):
f.append(bossData[i][0])
f.append(bossData[i][1] + bossData[i][5])
regenData.append(f)
regenTime.append(bossData[i][1] + bossData[i][5])
f = []
regenTime = sorted(list(set(regenTime)))
for j in range(len(regenTime)):
for i in range(len(regenData)):
if regenTime[j] == regenData[i][1] :
f.append(regenData[i][0])
regenbossName.append(f)
outputTimeHour.append(int(regenTime[j][:2]))
outputTimeMin.append(int(regenTime[j][2:]))
f = []
regenembed = discord.Embed(
title='----- ๋ฆฌ์คํฐ ๋ณด์ค -----',
description= ' ')
for i in range(len(regenTime)):
if outputTimeMin[i] == 0 :
regenembed.add_field(name=str(outputTimeHour[i]) + '์๊ฐ', value= '```'+ ', '.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
else :
regenembed.add_field(name=str(outputTimeHour[i]) + '์๊ฐ' + str(outputTimeMin[i]) + '๋ถ', value= '```' + ','.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
##########################################################
if basicSetting[10] !="":
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] #์ ์ฐ
credentials = ServiceAccountCredentials.from_json_keyfile_name(basicSetting[10], scope) #์ ์ฐ
init()
channel = ''
async def task():
await client.wait_until_ready()
global channel
global endTime
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fiexd_bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global endTime
if chflg == 1 :
if voice_client1.is_connected() == False :
voice_client1 = await client.get_channel(basicSetting[6]).connect(reconnect=True)
if voice_client1.is_connected() :
await dbLoad()
await client.get_channel(channel).send( '< ๋ค์ ์์ต๋๋ค! >', tts=False)
print("๋ช
์น๋ณต๊ตฌ์๋ฃ!")
while not client.is_closed():
############ ์๋์ก์! ############
if log_stream.getvalue().find("Awaiting") != -1:
log_stream.truncate(0)
log_stream.seek(0)
await client.get_channel(channel).send( '< ๋์ฝ์ ์์๋ฌ! ์ ๊น ๋๊ฐ๋ค ์ฌ๊ป์! >', tts=False)
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
await dbSave()
raise SystemExit
log_stream.truncate(0)
log_stream.seek(0)
##################################
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
priv0 = now+datetime.timedelta(minutes=int(basicSetting[3]))
priv = now+datetime.timedelta(minutes=int(basicSetting[1]))
aftr = now+datetime.timedelta(minutes=int(0-int(basicSetting[2])))
if channel != '':
################ ๋ณดํ๋ด ์ฌ์์ ################
if endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S'):
if basicSetting[2] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await FixedBossDateSave()
#await client.get_channel(channel).send('<๊ฐ์๊ธฐ ์ธ์ฌํด๋ ๋๋ผ์ง๋ง์ธ์!>', tts=False)
print("๋ณดํ๋ด์ฌ์์!")
endTime = endTime + datetime.timedelta(days = int(basicSetting[13]))
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
################ ๊ณ ์ ๋ณด์ค ํ์ธ ################
for i in range(fixed_bossNum):
################ before_alert1 ################
if fixed_bossTime[i] <= priv0 and fixed_bossTime[i] > priv:
if basicSetting[3] != '0':
if fixed_bossFlag0[i] == False:
fixed_bossFlag0[i] = True
await client.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[3] + '๋ถ ์ ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '์๋ฆผ1.mp3')
################ before_alert ################
if fixed_bossTime[i] <= priv and fixed_bossTime[i] > now:
if basicSetting[1] != '0' :
if fixed_bossFlag[i] == False:
fixed_bossFlag[i] = True
await client.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[1] + '๋ถ ์ ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '์๋ฆผ.mp3')
################ ๋ณด์ค ์ ์๊ฐ ํ์ธ ################
if fixed_bossTime[i] <= now :
fixed_bossTime[i] = fixed_bossTime[i]+datetime.timedelta(hours=int(fixed_bossData[i][5]), minutes=int(fixed_bossData[i][6]), seconds = int(0))
fixed_bossFlag0[i] = False
fixed_bossFlag[i] = False
embed = discord.Embed(
description= "```" + fixed_bossData[i][0] + fixed_bossData[i][4] + "```" ,
color=0x00ff00
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + fixed_bossData[i][0] + '์ .mp3')
################ ์ผ๋ฐ ๋ณด์ค ํ์ธ ################
for i in range(bossNum):
################ before_alert1 ################
if bossTime[i] <= priv0 and bossTime[i] > priv:
if basicSetting[3] != '0':
if bossFlag0[i] == False:
bossFlag0[i] = True
if bossData[i][6] != '' :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '๋ถ ์ ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '๋ถ ์ ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '์๋ฆผ1.mp3')
################ before_alert ################
if bossTime[i] <= priv and bossTime[i] > now:
if basicSetting[1] != '0' :
if bossFlag[i] == False:
bossFlag[i] = True
if bossData[i][6] != '' :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '๋ถ ์ ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await client.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '๋ถ ์ ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '์๋ฆผ.mp3')
################ ๋ณด์ค ์ ์๊ฐ ํ์ธ ################
if bossTime[i] <= now :
#print ('if ', bossTime[i])
bossMungFlag[i] = True
tmp_bossTime[i] = bossTime[i]
tmp_bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
tmp_bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
bossTime[i] = now+datetime.timedelta(days=365)
if bossData[i][6] != '' :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + '\n<' + bossData[i][6] + '>```' ,
color=0x00ff00
)
else :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + "```" ,
color=0x00ff00
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '์ .mp3')
################ ๋ณด์ค ์๋ ๋ฉ ์ฒ๋ฆฌ ################
if bossMungFlag[i] == True:
if (bossTime[i]+datetime.timedelta(days=-365)) <= aftr:
if basicSetting[2] != '0':
################ ๋ฏธ์
๋ ฅ ๋ณด์ค ################
if bossData[i][2] == '0':
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await client.get_channel(channel).send("```" + bossData[i][0] + ' ๋ฏธ์
๋ ฅ ๋์ต๋๋ค.```', tts=False)
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '๋ฏธ์
๋ ฅ.mp3')
################ ๋ฉ ๋ณด์ค ################
else :
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await client.get_channel(channel).send("```" + bossData[i][0] + ' ๋ฉ ์
๋๋ค.```')
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
await PlaySound(voice_client1, './sound/' + bossData[i][0] + '๋ฉ.mp3')
await asyncio.sleep(1) # task runs every 60 seconds
#mp3 ํ์ผ ์์ฑํจ์(gTTS ์ด์ฉ, ๋จ์ฑ๋ชฉ์๋ฆฌ)
async def MakeSound(saveSTR, filename):
'''
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.mp3')
'''
try:
encText = urllib.parse.quote(saveSTR)
urllib.request.urlretrieve("https://clova.ai/proxy/voice/api/tts?text=" + encText + "%0A&voicefont=1&format=wav",filename + '.wav')
except Exception as e:
print (e)
tts = gTTS(saveSTR, lang = 'ko')
tts.save('./' + filename + '.wav')
pass
#mp3 ํ์ผ ์ฌ์ํจ์
async def PlaySound(voiceclient, filename):
source = discord.FFmpegPCMAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
source.cleanup()
#my_bot.db ์ ์ฅํ๊ธฐ
async def dbSave():
global bossData
global bossNum
global bossTime
global bossTimeString
global bossDateString
global bossMungCnt
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist1 = bossTime
datelist = list(set(datelist1))
information1 = '----- ๋ณด์คํ ์ ๋ณด -----\n'
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' :
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (๋ฏธ์
๋ ฅ ' + str(bossMungCnt[i]) + 'ํ)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (๋ฉ ' + str(bossMungCnt[i]) + 'ํ)' + ' * ' + bossData[i][6] + '\n'
try :
contents = repo.get_contents("my_bot.db")
repo.update_file(contents.path, "bossDB", information1, contents.sha)
except GithubException as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#my_bot.db ๋ถ๋ฌ์ค๊ธฐ
async def dbLoad():
global LoadChk
contents1 = repo.get_contents("my_bot.db")
file_data = base64.b64decode(contents1.content)
file_data = file_data.decode('utf-8')
beforeBossData = file_data.split('\n')
if len(beforeBossData) > 1:
for i in range(len(beforeBossData)-1):
for j in range(bossNum):
startPos = beforeBossData[i+1].find('-')
endPos = beforeBossData[i+1].find('(')
if beforeBossData[i+1][startPos+2:endPos] == bossData[j][0] :
#if beforeBossData[i+1].find(bossData[j][0]) != -1 :
tmp_mungcnt = 0
tmp_len = beforeBossData[i+1].find(':')
tmp_datelen = beforeBossData[i+1].find('@')
tmp_msglen = beforeBossData[i+1].find('*')
years1 = beforeBossData[i+1][tmp_datelen+2:tmp_datelen+6]
months1 = beforeBossData[i+1][tmp_datelen+7:tmp_datelen+9]
days1 = beforeBossData[i+1][tmp_datelen+10:tmp_datelen+12]
hours1 = beforeBossData[i+1][tmp_len+2:tmp_len+4]
minutes1 = beforeBossData[i+1][tmp_len+5:tmp_len+7]
seconds1 = beforeBossData[i+1][tmp_len+8:tmp_len+10]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(year = int(years1), month = int(months1), day = int(days1), hour=int(hours1), minute=int(minutes1), second = int(seconds1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[j][1]), minutes = int(bossData[j][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
tmp_mungcnt = tmp_mungcnt + 1
now2 = tmp_now
tmp_bossTime[j] = bossTime[j] = now2
tmp_bossTimeString[j] = bossTimeString[j] = bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = bossDateString[j] = bossTime[j].strftime('%Y-%m-%d')
bossData[j][6] = beforeBossData[i+1][tmp_msglen+2:len(beforeBossData[i+1])]
if beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3] != 0 and beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] == ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
elif beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] != ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] + beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
else:
bossMungCnt[j] = 0
LoadChk = 0
print ("<๋ถ๋ฌ์ค๊ธฐ ์๋ฃ>")
else:
#await client.get_channel(channel).send('<๋ณด์คํ์ ์ ๋ณด๊ฐ ์์ต๋๋ค.>', tts=False)
LoadChk = 1
print ("๋ณด์คํ์ ์ ๋ณด๊ฐ ์์ต๋๋ค.")
#๊ณ ์ ๋ณด์ค ๋ ์ง์ ์ฅ
async def FixedBossDateSave():
global fixed_bossData
global fixed_bossTime
global fixed_bossNum
global FixedBossDateData
global indexFixedBossname
for value in indexFixedBossname:
for i in range(fixed_bossNum):
if FixedBossDateData[value].find(fixed_bossData[i][0]) != -1:
FixedBossDateData[value + 3] = 'startDate = '+ fixed_bossTime[i].strftime('%Y-%m-%d') + '\n'
FixedBossDateDataSTR = ""
for j in range(len(FixedBossDateData)):
pos = len(FixedBossDateData[j])
tmpSTR = FixedBossDateData[j][:pos-1] + '\r\n'
FixedBossDateDataSTR += tmpSTR
contents = repo.get_contents("fixed_boss.ini")
repo.update_file(contents.path, "bossDB", FixedBossDateDataSTR, contents.sha)
#์์ฑ์ฑ๋ ์
์ฅ
async def JointheVC(VCchannel, TXchannel):
global chkvoicechannel
global voice_client1
if VCchannel is not None:
if chkvoicechannel == 0:
voice_client1 = await VCchannel.connect(reconnect=True)
if voice_client1.is_connected():
await voice_client1.disconnect()
voice_client1 = await VCchannel.connect(reconnect=True)
chkvoicechannel = 1
#await PlaySound(voice_client1, './sound/hello.mp3')
else :
await voice_client1.disconnect()
voice_client1 = await VCchannel.connect(reconnect=True)
#await PlaySound(voice_client1, './sound/hello.mp3')
else:
await TXchannel.send('์์ฑ์ฑ๋์ ๋จผ์ ๋ค์ด๊ฐ์ฃผ์ธ์.', tts=False)
#์ฌ๋ค๋ฆฌํจ์
async def LadderFunc(number, ladderlist, channelVal):
if number < len(ladderlist):
result_ladder = random.sample(ladderlist, number)
result_ladderSTR = ','.join(map(str, result_ladder))
embed = discord.Embed(
title = "----- ๋น์ฒจ! -----",
description= '```' + result_ladderSTR + '```',
color=0xff00ff
)
await channelVal.send(embed=embed, tts=False)
else:
await channelVal.send('```์ถ์ฒจ์ธ์์ด ์ด ์ธ์๊ณผ ๊ฐ๊ฑฐ๋ ๋ง์ต๋๋ค. ์ฌ์
๋ ฅ ํด์ฃผ์ธ์```', tts=False)
## ๋ช
์น ์์ธ์ฒ๋ฆฌ
def handle_exit():
#print("Handling")
client.loop.run_until_complete(client.logout())
for t in asyncio.Task.all_tasks(loop=client.loop):
if t.done():
#t.exception()
try:
#print ('try : ', t)
t.exception()
except asyncio.CancelledError:
#print ('cancel : ', t)
continue
continue
t.cancel()
try:
client.loop.run_until_complete(asyncio.wait_for(t, 5, loop=client.loop))
t.exception()
except asyncio.InvalidStateError:
pass
except asyncio.TimeoutError:
pass
except asyncio.CancelledError:
pass
# ๋ด์ด ๊ตฌ๋๋์์ ๋ ๋์๋๋ ์ฝ๋์
๋๋ค.
@client.event
async def on_ready():
global channel
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chkvoicechannel
global chflg
global endTime
print("Logged in as ") #ํ๋ฉด์ ๋ด์ ์์ด๋, ๋๋ค์์ด ์ถ๋ ฅ๋ฉ๋๋ค.
print(client.user.name)
print(client.user.id)
print("===========")
#await joinVoiceChannel()
all_channels = client.get_all_channels()
for channel1 in all_channels:
channel_type.append(str(channel1.type))
channel_info.append(channel1)
for i in range(len(channel_info)):
if channel_type[i] == "text":
channel_name.append(str(channel_info[i].name))
channel_id.append(str(channel_info[i].id))
for i in range(len(channel_info)):
if channel_type[i] == "voice":
channel_voice_name.append(str(channel_info[i].name))
channel_voice_id.append(str(channel_info[i].id))
await dbLoad()
if basicSetting[6] != "" and basicSetting[7] != "" :
#print ('join channel')
await JointheVC(client.get_channel(basicSetting[6]), client.get_channel(basicSetting[7]))
channel = basicSetting[7]
chflg = 1
print('< ํ
์คํธ์ฑ๋ [' + client.get_channel(basicSetting[7]).name + '] ์ ์์๋ฃ>')
print('< ์์ฑ์ฑ๋ [' + client.get_channel(basicSetting[6]).name + '] ์ ์์๋ฃ>')
if basicSetting[8] != "":
print('< ์ฌ๋ค๋ฆฌ์ฑ๋ [' + client.get_channel(int(basicSetting[8])).name + '] ์ ์์๋ฃ>')
if basicSetting[11] != "":
print('< ์ ์ฐ์ฑ๋ [' + client.get_channel(int(basicSetting[11])).name + '] ์ ์์๋ฃ>')
if int(basicSetting[13]) != 0 :
print('< ๋ณดํ๋ด ์ฌ์์ ์๊ฐ ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< ๋ณดํ๋ด ์ฌ์์ ์ฃผ๊ธฐ ' + basicSetting[13] + '์ผ >')
else :
print('< ๋ณดํ๋ด ์ฌ์์ ์ค์ ์๋จ >')
# ๋์ค์ฝ๋์๋ ํ์ฌ ๋ณธ์ธ์ด ์ด๋ค ๊ฒ์์ ํ๋ ์ดํ๋์ง ๋ณด์ฌ์ฃผ๋ ๊ธฐ๋ฅ์ด ์์ต๋๋ค.
# ์ด ๊ธฐ๋ฅ์ ์ฌ์ฉํ์ฌ ๋ด์ ์ํ๋ฅผ ๊ฐ๋จํ๊ฒ ์ถ๋ ฅํด์ค ์ ์์ต๋๋ค.
await client.change_presence(status=discord.Status.dnd, activity=discord.Game(name="!๋ฉ๋ด", type=1), afk=False)
while True:
# ๋ด์ด ์๋ก์ด ๋ฉ์์ง๋ฅผ ์์ ํ์๋ ๋์๋๋ ์ฝ๋์
๋๋ค.
@client.event
async def on_message(msg):
if msg.author.bot: #๋ง์ฝ ๋ฉ์์ง๋ฅผ ๋ณด๋ธ์ฌ๋์ด ๋ด์ผ ๊ฒฝ์ฐ์๋
return None #๋์ํ์ง ์๊ณ ๋ฌด์ํฉ๋๋ค.
global channel
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
global voice_client1
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chflg
global LoadChk
global indexFixedBossname
global FixedBossDateData
global gc #์ ์ฐ
global credentials #์ ์ฐ
global regenembed
id = msg.author.id #id๋ผ๋ ๋ณ์์๋ ๋ฉ์์ง๋ฅผ ๋ณด๋ธ์ฌ๋์ ID๋ฅผ ๋ด์ต๋๋ค.
if chflg == 0 :
channel = int(msg.channel.id) #channel์ด๋ผ๋ ๋ณ์์๋ ๋ฉ์์ง๋ฅผ ๋ฐ์ ์ฑ๋์ ID๋ฅผ ๋ด์ต๋๋ค
if basicSetting[7] == "":
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i] == 'textchannel = \r':
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = channel
#print ('======', inputData_text[i])
result_textCH = '\n'.join(inputData_textCH)
#print (result_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print('< ํ
์คํธ์ฑ๋ [' + client.get_channel(channel).name + '] ์ ์์๋ฃ>')
if basicSetting[6] != "":
#print ('join channel')
await JointheVC(client.get_channel(basicSetting[6]), channel)
print('< ์์ฑ์ฑ๋ [' + client.get_channel(basicSetting[6]).name + '] ์ ์์๋ฃ>')
if int(basicSetting[13]) != 0 :
print('< ๋ณดํ๋ด ์ฌ์์ ์๊ฐ ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< ๋ณดํ๋ด ์ฌ์์ ์ฃผ๊ธฐ ' + basicSetting[13] + '์ผ >')
else :
print('< ๋ณดํ๋ด ์ฌ์์ ์ค์ ์๋จ >')
chflg = 1
if client.get_channel(channel) != msg.channel:
##### ์ฌ๋ค๋ฆฌ ์ฑ๋๋ฐ๊พธ๊ธฐ
if basicSetting[8] != "":
if msg.channel.id == int(basicSetting[8]): #### ์ฌ๋ค๋ฆฌ ์ฑ๋ID ๊ฐ๋ฃ์ผ๋ฉด ๋จ
message = await msg.channel.fetch_message(msg.id)
##################################
if message.content.startswith(command[11]):
ladder = []
ladder = message.content[len(command[11])+1:].split(" ")
num_cong = int(ladder[0])
del(ladder[0])
await LadderFunc(num_cong, ladder, msg.channel)
##################################
if basicSetting[11] != "":
if msg.channel.id == int(basicSetting[11]) : #### ์ ์ฐ์ฑ๋ ์ฑ๋ID ๊ฐ๋ฃ์ผ๋ฉด ๋จ
message = await msg.channel.fetch_message(msg.id)
################ ์ ์ฐํ์ธ ################
if message.content.startswith(command[12]):
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = message.content[len(command[12])+1:]
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' ๋์ด ๋ฐ์ ๋ค์ด์ผ๋ ' + result + ' ๋ค์ด์ผ ์
๋๋ค.```',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
else :
message = await client.get_channel(channel).fetch_message(msg.id)
################ ํ
์คํธ ์ ๋ณดํ์ธ ################
if message.content == command[2]:
ch_information = []
cnt = 0
ch_information.append('')
for i in range(len(channel_name)):
if len(ch_information[cnt]) > 1000 :
ch_information.append('')
cnt += 1
ch_information[cnt] = ch_information[cnt] + '[' + channel_id[i] + '] ' + channel_name[i] + '\n'
ch_voice_information = []
cntV = 0
ch_voice_information.append('')
for i in range(len(channel_voice_name)):
if len(ch_voice_information[cntV]) > 1000 :
ch_voice_information.append('')
cntV += 1
ch_voice_information[cntV] = ch_voice_information[cntV] + '[' + channel_voice_id[i] + '] ' + channel_voice_name[i] + '\n'
'''
for i in range(len(ch_information)):
print ("--------------------------")
print (ch_information[i])
print (len(ch_information[i]))
print (len(ch_information))
for i in range(len(ch_voice_information)):
print ("+++++++++++++++++++++++++")
print (ch_voice_information[i])
print (len(ch_voice_information[i]))
print (len(ch_voice_information))
'''
if len(ch_information) == 1 and len(ch_voice_information) == 1:
embed = discord.Embed(
title = "----- ์ฑ๋ ์ ๋ณด -----",
description= '',
color=0xff00ff
)
embed.add_field(
name="< ํ์คํธ ์ฑ๋ >",
value= '```' + ch_information[0] + '```',
inline = False
)
embed.add_field(
name="< ๋ณด์ด์ค ์ฑ๋ >",
value= '```' + ch_voice_information[0] + '```',
inline = False
)
await client.get_channel(channel).send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- ์ฑ๋ ์ ๋ณด -----\n< ํ์คํธ ์ฑ๋ >",
description= '```' + ch_information[0] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(ch_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_information[i+1] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
embed = discord.Embed(
title = "< ์์ฑ ์ฑ๋ >",
description= '```' + ch_voice_information[0] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(ch_voice_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_voice_information[i+1] + '```',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ ํ
์คํธ์ฑ๋์ด๋ ################
if message.content.startswith(command[3]):
tmp_sayMessage1 = message.content
for i in range(len(channel_name)):
if channel_name[i] == str(tmp_sayMessage1[len(command[3])+1:]):
channel = int(channel_id[i])
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i] == 'textchannel = ' + str(basicSetting[7]) + '\r':
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = int(channel)
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await client.get_channel(channel).send('< ' + client.get_channel(channel).name + ' ์ด๋์๋ฃ>', tts=False)
hello = message.content
##################################
for i in range(bossNum):
################ ๋ณด์ค ์ปท์ฒ๋ฆฌ ################
if message.content.startswith(bossData[i][0] +'์ปท'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'์ปท'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
################ ๋ณด์ค ๋ฉ ์ฒ๋ฆฌ ################
if message.content.startswith(bossData[i][0] +'๋ฉ'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'๋ฉ'
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
nextTime = temptime + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossMungCnt[i] = 0
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
if nextTime > tmp_now :
nextTime = nextTime + datetime.timedelta(days=int(-1))
if nextTime < tmp_now :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while tmp_now > nextTime :
nextTime = nextTime + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
else :
nextTime = nextTime
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
if tmp_bossTime[i] < tmp_now :
nextTime = tmp_bossTime[i] + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
await client.get_channel(channel).send('```' + bossData[i][0] + 'ํ์ด ์์ง ์๋์ต๋๋ค. ๋ค์ ' + bossData[i][0] + 'ํ [' + tmp_bossTimeString[i] + '] ์
๋๋ค```', tts=False)
################ ์์ ๋ณด์ค ํ์ ์
๋ ฅ ################
if message.content.startswith(bossData[i][0] +'์์'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'์์'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
embed = discord.Embed(
description= '```๋ค์ ' + bossData[i][0] + ' ' + bossTimeString[i] + '์
๋๋ค.```',
color=0xff0000
)
await client.get_channel(channel).send(embed=embed, tts=False)
else:
await client.get_channel(channel).send('```' + bossData[i][0] +' ์์ ์๊ฐ์ ์
๋ ฅํด์ฃผ์ธ์.```', tts=False)
################ ๋ณด์คํ์ ์ญ์ ################
if message.content == bossData[i][0] +'์ญ์ ':
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = (False)
bossFlag0[i] = (False)
bossMungFlag[i] = (False)
bossMungCnt[i] = 0
await client.get_channel(channel).send('<' + bossData[i][0] + ' ์ญ์ ์๋ฃ>', tts=False)
await dbSave()
print ('<' + bossData[i][0] + ' ์ญ์ ์๋ฃ>')
################ ๋ณด์ค๋ณ ๋ฉ๋ชจ ################
if message.content.startswith(bossData[i][0] +'๋ฉ๋ชจ '):
tmp_msg = bossData[i][0] +'๋ฉ๋ชจ '
bossData[i][6] = hello[len(tmp_msg):]
await client.get_channel(channel).send('< ' + bossData[i][0] + ' [ ' + bossData[i][6] + ' ] ๋ฉ๋ชจ๋ฑ๋ก ์๋ฃ>', tts=False)
if message.content.startswith(bossData[i][0] +'๋ฉ๋ชจ์ญ์ '):
bossData[i][6] = ''
await client.get_channel(channel).send('< ' + bossData[i][0] + ' ๋ฉ๋ชจ์ญ์ ์๋ฃ>', tts=False)
################ ?????????????? ################
if message.content == '!์ค๋น ' :
await PlaySound(voice_client1, './sound/์ค๋น .mp3')
if message.content == '!์ธ๋' :
await PlaySound(voice_client1, './sound/์ธ๋.mp3')
if message.content == '!ํ' :
await PlaySound(voice_client1, './sound/ํ.mp3')
if message.content == '!TJ' or message.content == '!tj' :
resultTJ = random.randrange(1,9)
await PlaySound(voice_client1, './sound/TJ' + str(resultTJ) +'.mp3')
################ ๋ถ๋ฐฐ ๊ฒฐ๊ณผ ์ถ๋ ฅ ################
if message.content.startswith(command[10]):
separate_money = []
separate_money = message.content[len(command[10])+1:].split(" ")
num_sep = int(separate_money[0])
cal_tax1 = math.ceil(float(separate_money[1])*0.05)
real_money = int(int(separate_money[1]) - cal_tax1)
cal_tax2 = int(real_money/num_sep) - math.ceil(float(int(real_money/num_sep))*0.95)
if num_sep == 0 :
await client.get_channel(channel).send('```๋ถ๋ฐฐ ์ธ์์ด 0์
๋๋ค. ์ฌ์
๋ ฅ ํด์ฃผ์ธ์.```', tts=False)
else :
await client.get_channel(channel).send('```1์ฐจ์ธ๊ธ : ' + str(cal_tax1) + '\n1์ฐจ ์๋ น์ก : ' + str(real_money) + '\n๋ถ๋ฐฐ์ ๊ฑฐ๋์๋ฑ๋ก๊ธ์ก : ' + str(int(real_money/num_sep)) + '\n2์ฐจ์ธ๊ธ : ' + str(cal_tax2) + '\n์ธ๋น ์ค์๋ น์ก : ' + str(int(float(int(real_money/num_sep))*0.95)) + '```', tts=False)
################ ์ฌ๋ค๋ฆฌ ๊ฒฐ๊ณผ ์ถ๋ ฅ ################
if message.content.startswith(command[11]):
ladder = []
ladder = message.content[len(command[11])+1:].split(" ")
num_cong = int(ladder[0])
del(ladder[0])
await LadderFunc(num_cong, ladder, client.get_channel(channel))
################ ๋ณดํ๋ด ๋ฉ๋ด ์ถ๋ ฅ ################
if message.content == command[0]:
command_list = ''
command_list += command[1] + '\n' #!์ค์ ํ์ธ
command_list += command[2] + '\n' #!์ฑ๋ํ์ธ
command_list += command[3] + ' [์ฑ๋๋ช
]\n' #!์ฑ๋์ด๋
command_list += command[4] + '\n' #!์ํ
command_list += command[5] + '\n' #!๋ถ๋ฌ์ค๊ธฐ
command_list += command[6] + '\n' #!์ด๊ธฐํ
command_list += command[7] + '\n' #!๋ช
์น
command_list += command[8] + '\n' #!์ฌ์์
command_list += command[9] + '\n' #!๋ฏธ์์ฝ
command_list += command[10] + ' [์ธ์] [๊ธ์ก]\n' #!๋ถ๋ฐฐ
command_list += command[11] + ' [๋ฝ์์ธ์์] [์์ด๋1] [์์ด๋2]...\n' #!์ฌ๋ค๋ฆฌ
command_list += command[12] + ' [์์ด๋]\n' #!์ ์ฐ
command_list += command[13] + ' ๋๋ ' + command[14] + ' 0000, 00:00\n' #!๋ณด์ค์ผ๊ด
command_list += command[14] + '\n' #!q
command_list += command[15] + ' [ํ ๋ง]\n' #!v
command_list += command[16] + '\n' #!๋ฆฌ์
command_list += command[17] + '\n' #!ํ์ฌ์๊ฐ
command_list += command[18] + '\n' #!๊ณต์ง
command_list += command[18] + ' [๊ณต์ง๋ด์ฉ]\n' #!๊ณต์ง
command_list += command[18] + '์ญ์ \n' #!๊ณต์ง
command_list += command[19] + ' [ํ ๋ง]\n\n' #!์ํ
command_list += command[20] + '\n' #๋ณด์คํ
command_list += command[21] + '\n' #!๋ณด์คํ
command_list += command[22] + '\n' #!
command_list += command[23] + ' [๊ธ์ก]\n' #!
command_list += '[๋ณด์ค๋ช
]์ปท ๋๋ [๋ณด์ค๋ช
]์ปท 0000, 00:00\n'
command_list += '[๋ณด์ค๋ช
]๋ฉ ๋๋ [๋ณด์ค๋ช
]๋ฉ 0000, 00:00\n'
command_list += '[๋ณด์ค๋ช
]์์ ๋๋ [๋ณด์ค๋ช
]์์ 0000, 00:00\n'
command_list += '[๋ณด์ค๋ช
]์ญ์ \n'
command_list += '[๋ณด์ค๋ช
]๋ฉ๋ชจ [ํ ๋ง]\n'
embed = discord.Embed(
title = "----- ๋ช
๋ น์ด -----",
description= '```' + command_list + '```',
color=0xff00ff
)
embed.add_field(
name="----- ์ถ๊ฐ๊ธฐ๋ฅ -----",
value= '```[๋ณด์ค๋ช
]์ปท/๋ฉ/์์ [ํ ๋ง] : ๋ณด์ค์๊ฐ ์
๋ ฅ ํ ๋น์นธ ๋๋ฒ!! ๋ฉ๋ชจ ๊ฐ๋ฅ```'
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ ๋ฏธ์์ฝ ๋ณด์คํ์ ์ถ๋ ฅ ################
if message.content == command[9]:
temp_bossTime2 = []
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' :
temp_bossTime2.append(bossData[i][0])
if len(temp_bossTime2) != 0:
temp_bossTimeSTR1 = ','.join(map(str, temp_bossTime2))
temp_bossTimeSTR1 = '```fix\n' + temp_bossTimeSTR1 + '\n```'
else:
temp_bossTimeSTR1 = '``` ```'
embed = discord.Embed(
title = "----- ๋ฏธ์์ฝ๋ณด์ค -----",
description= temp_bossTimeSTR1,
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ ์์ฑํ์ผ ์์ฑ ํ ์ฌ์ ################
if message.content.startswith(command[15]) or message.content.startswith('!ใ
') or message.content.startswith('!V'):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[15])+1:]
await MakeSound(message.author.display_name +'๋์ด.' + sayMessage, './sound/say')
await client.get_channel(channel).send("```< " + msg.author.display_name + " >๋์ด \"" + sayMessage + "\"```", tts=False)
await PlaySound(voice_client1, './sound/say.wav')
################ ๋ณดํ๋ด ์ฌ์์ ################
if message.content == command[8] :
if basicSetting[2] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
#await FixedBossDateSave()
#await client.get_channel(channel).send('<๋ณดํ๋ด ์ฌ์์ ์ค... ๊ฐ์๊ธฐ ์ธ์ฌํด๋ ๋๋ผ์ง๋ง์ธ์!>', tts=False)
print("๋ณดํ๋ด๊ฐ์ ์ฌ์์!")
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
################ ๋ณดํ๋ด ์์ฑ์ฑ๋ ์ํ ################
if message.content == command[4]:
if message.author.voice == None:
await client.get_channel(channel).send('์์ฑ์ฑ๋์ ๋จผ์ ๋ค์ด๊ฐ์ฃผ์ธ์.', tts=False)
else:
voice_channel = message.author.voice.channel
if basicSetting[6] == "":
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i] == 'voicechannel = \r':
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
elif basicSetting[6] != int(voice_channel.id):
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i] == 'voicechannel = ' + str(basicSetting[6]) + '\r':
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
await JointheVC(voice_channel, channel)
await client.get_channel(channel).send('< ์์ฑ์ฑ๋ [' + client.get_channel(voice_channel.id).name + '] ์ ์์๋ฃ>', tts=False)
################ ์ ์ฅ๋ ์ ๋ณด ์ด๊ธฐํ ################
if message.content == command[6] :
basicSetting = []
bossData = []
fixed_bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
FixedBossDateData = []
indexFixedBossname = []
init()
await dbSave()
await client.get_channel(channel).send('<์ด๊ธฐํ ์๋ฃ>', tts=False)
print ("<์ด๊ธฐํ ์๋ฃ>")
################ ๋ณด์คํ์ ์ผ๊ด ์ค์ ################
if message.content.startswith(command[13]):
for i in range(bossNum):
tmp_msg = command[13]
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await client.get_channel(channel).send('<๋ณด์ค ์ผ๊ด ์
๋ ฅ ์๋ฃ>', tts=False)
print ("<๋ณด์ค ์ผ๊ด ์
๋ ฅ ์๋ฃ>")
################ ๋ณดํ๋ด ๊ธฐ๋ณธ ์ค์ ํ์ธ ################
if message.content == command[1]:
setting_val = '๋ณดํ๋ด๋ฒ์ : Server Ver.14 (2020. 1. 30.)\n'
setting_val += '์์ฑ์ฑ๋ : ' + client.get_channel(basicSetting[6]).name + '\n'
setting_val += 'ํ
์คํธ์ฑ๋ : ' + client.get_channel(basicSetting[7]).name +'\n'
if basicSetting[8] != "" :
setting_val += '์ฌ๋ค๋ฆฌ์ฑ๋ : ' + client.get_channel(int(basicSetting[8])).name + '\n'
if basicSetting[11] != "" :
setting_val += '์ ์ฐ์ฑ๋ : ' + client.get_channel(int(basicSetting[11])).name + '\n'
setting_val += '๋ณด์ค์ ์๋ฆผ์๊ฐ1 : ' + basicSetting[1] + ' ๋ถ ์ \n'
setting_val += '๋ณด์ค์ ์๋ฆผ์๊ฐ2 : ' + basicSetting[3] + ' ๋ถ ์ \n'
setting_val += '๋ณด์ค๋ฉํ์ธ์๊ฐ : ' + basicSetting[2] + ' ๋ถ ํ\n'
embed = discord.Embed(
title = "----- ์ค์ ๋ด์ฉ -----",
description= '```' + setting_val + '```',
color=0xff00ff
)
await client.get_channel(channel).send(embed=embed, tts=False)
################ my_bot.db์ ์ ์ฅ๋ ๋ณด์คํ์ ๋ถ๋ฌ์ค๊ธฐ ################
if message.content == command[5] :
await dbLoad()
if LoadChk == 0:
await client.get_channel(channel).send('<๋ถ๋ฌ์ค๊ธฐ ์๋ฃ>', tts=False)
else:
await client.get_channel(channel).send('<๋ณด์คํ์ ์ ๋ณด๊ฐ ์์ต๋๋ค.>', tts=False)
################ ๊ฐ์ฅ ๊ทผ์ ํ ๋ณด์คํ์ ์ถ๋ ฅ ################
if message.content == '!ใ
' or message.content == command[14] or message.content == '!ใ
' or message.content == '!Q':
checkTime = datetime.datetime.now() + datetime.timedelta(days=1, hours = int(basicSetting[0]))
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
sorted_datelist = []
for i in range(bossNum):
if bossMungFlag[i] != True and bossTimeString[i] != '99:99:99' :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossMungFlag[i] != True :
aa.append(bossData[i][0]) #output_bossData[0] : ๋ณด์ค๋ช
aa.append(bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : ๋ณด์ค๋ช
aa.append(fixed_bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
ouput_bossData.append(aa)
aa = []
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await client.get_channel(channel).send( '<๋ณด์คํ์ ์ ๋ณด๊ฐ ์์ต๋๋ค.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[9]):
for j in range(int(basicSetting[9])):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '๋ค์ ' + ouput_bossData[i][0] + 'ํ๊น์ง %02d:%02d:%02d ๋จ์์ต๋๋ค. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '๋ค์ ' + ouput_bossData[i][0] + 'ํ๊น์ง %02d:%02d:%02d ๋จ์์ต๋๋ค. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ ๋ณด์คํ์ ์ถ๋ ฅ ################
if message.content == command[20]:
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
temp_bossTime1.append(bossData[i][0])
else :
aa.append(bossData[i][0]) #output_bossData[0] : ๋ณด์ค๋ช
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : ๋ฉ/๋ฏธ์
๋ ฅ ๋ณด์ค
aa.append(bossMungCnt[i]) #output_bossData[5] : ๋ฉ/๋ฏธ์
๋ ฅํ์
aa.append(bossData[i][6]) #output_bossData[6] : ๋ฉ์ธ์ง
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : ๋ณด์ค๋ช
aa.append(fixed_bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : ๋ฉ/๋ฏธ์
๋ ฅ ๋ณด์ค
aa.append(0) #output_bossData[5] : ๋ฉ/๋ฏธ์
๋ ฅํ์
aa.append("") #output_bossData[6] : ๋ฉ์ธ์ง
ouput_bossData.append(aa)
aa = []
if len(temp_bossTime1) != 0:
temp_bossTimeSTR1 = ','.join(map(str, temp_bossTime1))
temp_bossTimeSTR1 = '```fix\n' + temp_bossTimeSTR1 + '\n```'
else:
temp_bossTimeSTR1 = '``` ```'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (๋ฏธ ' + str(ouput_bossData[i][5]) + 'ํ)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (๋ฉ ' + str(ouput_bossData[i][5]) + 'ํ)' + ' ' + ouput_bossData[i][6] + '\n'
if len(boss_information) == 1:
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- ๋ณด์คํ ์ ๋ณด -----",
description= boss_information[0],
color=0x0000ff
)
embed.add_field(
name="----- ๋ฏธ์์ฝ ๋ณด์ค -----",
value= temp_bossTimeSTR1,
inline = False
)
await client.get_channel(channel).send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- ๋ณด์คํ ์ ๋ณด -----",
description= '```diff\n' + boss_information[0] + '```',
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
embed = discord.Embed(
title = "----- ๋ฏธ์์ฝ ๋ณด์ค -----",
description= temp_bossTimeSTR1,
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
await dbSave()
################ ๋ณด์คํ์ ์ถ๋ ฅ(๊ณ ์ ๋ณด์คํฌํจ) ################
if message.content == command[21]:
datelist = []
datelist2 = []
temp_bossTime1 = []
ouput_bossData = []
aa = []
fixed_datelist = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
temp_bossTime1.append(bossData[i][0])
else :
aa.append(bossData[i][0]) #output_bossData[0] : ๋ณด์ค๋ช
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : ์๊ฐ
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : ์๊ฐ(00:00:00)
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : ๋ฉ/๋ฏธ์
๋ ฅ ๋ณด์ค
aa.append(bossMungCnt[i]) #output_bossData[5] : ๋ฉ/๋ฏธ์
๋ ฅํ์
aa.append(bossData[i][6]) #output_bossData[6] : ๋ฉ์ธ์ง
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
fixed_datelist.append(fixed_bossTime[i])
fixed_datelist = list(set(fixed_datelist))
temp_bossTime1 = []
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' :
temp_bossTime1.append(bossData[i][0])
if len(temp_bossTime1) != 0:
temp_bossTimeSTR1 = ','.join(map(str, temp_bossTime1))
temp_bossTimeSTR1 = '```fix\n' + temp_bossTimeSTR1 + '\n```'
else:
temp_bossTimeSTR1 = '``` ```'
fixedboss_information = []
cntF = 0
fixedboss_information.append('')
for timestring1 in sorted(fixed_datelist):
if len(fixedboss_information[cntF]) > 1800 :
fixedboss_information.append('')
cntF += 1
for i in range(fixed_bossNum):
if timestring1 == fixed_bossTime[i]:
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == fixed_bossTime[i].strftime('%Y-%m-%d'):
tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M:%S')
else:
tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M:%S')
fixedboss_information[cntF] = fixedboss_information[cntF] + tmp_timeSTR + ' : ' + fixed_bossData[i][0] + '\n'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (๋ฏธ ' + str(ouput_bossData[i][5]) + 'ํ)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (๋ฉ ' + str(ouput_bossData[i][5]) + 'ํ)' + ' ' + ouput_bossData[i][6] + '\n'
if len(fixedboss_information[0]) != 0:
fixedboss_information[0] = "```diff\n" + fixedboss_information[0] + "\n```"
else :
fixedboss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- ๊ณ ์ ๋ณด ์ค -----",
description= fixedboss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(fixedboss_information)-1):
if len(fixedboss_information[i+1]) != 0:
fixedboss_information[i+1] = "```diff\n" + fixedboss_information[i+1] + "\n```"
else :
fixedboss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= fixedboss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- ๋ณด์คํ ์ ๋ณด -----",
description= boss_information[0],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
embed = discord.Embed(
title = "----- ๋ฏธ์์ฝ ๋ณด์ค -----",
description= temp_bossTimeSTR1,
color=0x0000ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
await dbSave()
################ ํ์ฌ์๊ฐ ํ์ธ ################
if message.content == command[17] :
curruntTime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
embed = discord.Embed(
title = 'ํ์ฌ์๊ฐ์ ' + curruntTime.strftime('%H') + '์ ' + curruntTime.strftime('%M') + '๋ถ ' + curruntTime.strftime('%S')+ '์ด ์
๋๋ค.',
color=0xff00ff
)
await client.get_channel(channel).send( embed=embed, tts=False)
################ ๋ฆฌ์ ์๊ฐ ์ถ๋ ฅ ################
if message.content == command[16] :
await client.get_channel(channel).send(embed=regenembed, tts=False)
################ ๋ฐ์ง์ ๋ณด ์ถ๋ ฅ ################
if message.content == command[22] :
noti_list = ''
noti_list += '<๋ถ ๋ฐ์ง>\n'
noti_list += 'ํฐ๋ง๊ฒฝ๊ณ์ง, ํฐ๋ง์ ์ด, ํ์๊ทธ๋ฆฌ์ค์ ๋จ\n'
noti_list += '์์ํ2์ธต, ์ํ3,4์ธต\n'
noti_list += '<๋ฐ๋ ๋ฐ์ง>\n'
noti_list += '์ฐ์ ์ฐ์ฑ, ๊ฒ์์ฒ, ์ฌ๋ฅ๊พผ๊ณ๊ณก\n'
noti_list += 'ํ์ด๋ฆฌ์ ๊ณ๊ณก ์์ชฝ, ๋์ชฝ, ์์ ์ ํ์\n'
noti_list += 'ํ๋ฃจ์ ๊ฑฐ์ฃผ์ง&์ด์\n'
noti_list += 'ํฐ๋ฏธ๋์์ ๋ณด๊ธ์๋ฆฌ, ๋ฐ๋์ ์ธ๋, ์์ํ1์ธต\n'
noti_list += '<์ํ ๋ฐ์ง>\n'
noti_list += '์๋ฆฌ์ฒด, ์์ํ ๋ถํ๊ตฌ, ์ํ 1,2์ธต\n'
noti_list += '<๋
๋ฐ์ง>\n'
noti_list += '์ํ 5,6์ธต, ํฌ์์ ๋ฐ๋ค ์๋ถ, ๋๋ถ, ํฌ์ ํ์ฐ์ง\n'
noti_list += '<๋ฌผ ๋ฐ์ง>\n'
noti_list += '๋๋ง๋ฑ์ด์, ๋ ํ ๋ถ๋ฝ, ํฌ์์ ์ต์ง,\n'
ringembed = discord.Embed(
title = "----- ๋ฐ์ง ์ ๋ณด -----",
description= noti_list,
color=0xff00ff
)
await client.get_channel(channel).send( embed=ringembed, tts=False)
################ ๊ณ์ฐ ์ถ๋ ฅ ################
if message.content == command[23] :
separate_money = message.content[len(command[23])+1:].split(" ")
cal_tax1 = math.ceil(float(separate_money)*0.05)
cal_tax2 = math.ceil(float(separate_money)*0.95)
await client.get_channel(channel).send(
'=======================' +
'\nํ์ด๋ฐฑ ๊ธ์ก : ' +
str(cal_tax2) +
'\n์์๋ฃ : ' +
str(cal_tax1) +
'======================', tts=False)
################ ๋ช
์กด์ ################
if message.content == command[7]:
await client.get_channel(channel).send( '< ๋ณดํ๋ด ๋ช
์น ๋ง๊ณ ์จ ๊ณ ๋ฅด๊ธฐ ์ค! ์ ์๋ง์! >', tts=False)
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
await dbSave()
print("๋ช
์น!")
await voice_client1.disconnect()
#client.clear()
raise SystemExit
################ ์ํ๋ฉ์ธ์ง๋ณ๊ฒฝ ################
if message.content.startswith(command[19]):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[19])+1:]
await client.change_presence(status=discord.Status.dnd, activity=discord.Game(name=sayMessage, type=1), afk = False)
await client.get_channel(channel).send( '< ์ํ๋ฉ์ธ์ง ๋ณ๊ฒฝ์๋ฃ >', tts=False)
################ ๊ณต์งํ์ธ, ์
๋ ฅ ๋ฐ ์ญ์ ################
if message.content == command[18]:
notice_initdata = repo.get_contents("notice.ini")
notice = base64.b64decode(notice_initdata.content)
notice = notice.decode('utf-8')
if notice != '' :
embed = discord.Embed(
description= str(notice),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '๋ฑ๋ก๋ ๊ณต์ง๊ฐ ์์ต๋๋ค.',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
if message.content.startswith(command[18] + ' '):
tmp_sayMessage = message.content
sayMessage = tmp_sayMessage[len(command[18])+1:]
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice ๋ฑ๋ก", sayMessage, contents.sha)
await client.get_channel(channel).send( '< ๊ณต์ง ๋ฑ๋ก์๋ฃ >', tts=False)
if message.content == command[18] + '์ญ์ ':
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice ์ญ์ ", '', contents.sha)
await client.get_channel(channel).send( '< ๊ณต์ง ์ญ์ ์๋ฃ >', tts=False)
################ ์ ์ฐํ์ธ ################
if message.content.startswith(command[12]):
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = hello[len(command[12])+1:]
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' ๋์ด ๋ฐ์ ๋ค์ด์ผ๋ ' + result + ' ๋ค์ด์ผ ์
๋๋ค.```',
color=0xff00ff
)
await msg.channel.send(embed=embed, tts=False)
client.loop.create_task(task())
try:
client.loop.run_until_complete(client.start(access_token))
except SystemExit:
handle_exit()
except KeyboardInterrupt:
handle_exit()
#client.loop.close()
#print("Program ended")
#break
print("Bot restarting")
client = discord.Client(loop=client.loop)
| []
| []
| [
"GIT_REPO_RESTART",
"GIT_TOKEN",
"GIT_REPO",
"BOT_TOKEN"
]
| [] | ["GIT_REPO_RESTART", "GIT_TOKEN", "GIT_REPO", "BOT_TOKEN"] | python | 4 | 0 | |
zeeguu_core/word_scheduling/arts/tools/algo_parameter_approximator.py | # -*- coding: utf8 -*-
"""
This file provides a meta-analysis to optimize the parameters of the word scheduling algorithms
for individual or all users.
It simulates the algorithm through multiple runs and optimizes the parameter based on a predefined set of
optimization goals.
The file can be run by itself, makes uses of code of Zeeguu and the connect data in the database (read), however
the Zeeguu Core does not depend on this. For a later manual analysis a csv file is written.
"""
import csv
import datetime
import math
import os
import random
from statistics import median
from timeit import default_timer as timer
import flask_sqlalchemy
import zeeguu_core
from flask import Flask
from zeeguu_core.model import User, ExerciseOutcome, Exercise, ExerciseSource
from zeeguu_core.word_scheduling.arts.algorithm_wrapper import AlgorithmWrapper
from zeeguu_core.word_scheduling.arts.arts_rt import ArtsRT
from zeeguu_core.word_scheduling.arts.bookmark_priority_updater import PriorityInfo, BookmarkPriorityUpdater
#:nocov:
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
class AverageBookmarkExercise:
"""Represents the average exercise for one bookmark and
keeps track of the history of exercises during the simulation
It is based on the ratio of correct/incorrect answer and reaction time of all by the user done exercises
It get used to create a model with real data in order to extend it later in the simulation (append_new_exercise)
"""
"""Default value for the probability that an exercise is correct, if no real data exists"""
DEFAULT_PROP_CORRECT = 0.5
"""Default value for the reaction time for an exercise, if no real data exists"""
DEFAULT_REACTION_TIME = 500
"""Caching of the used exercise source"""
exercise_source = ExerciseSource.find("Test")
def __init__(self, bookmark):
"""Create a new AverageBookmarkExercise for a bookmark"""
"""Original exercises"""
self.exercise_log = bookmark.exercise_log
"""Added exercises during the simulation"""
self.exercises = []
"""Corresponds to exercises and keeps the iteration in which the exercise was added"""
self.exercises_iteration = []
"""Current bookmark priority"""
self.priorities = []
self.avg_solving_speed, self.prob_correct = self._get_avg_exercise(bookmark.exercise_log)
@classmethod
def _get_avg_exercise(cls, exercise_log):
"""
Get the average exercise parameters based on the current exercise log of the bookmark
"""
if len(exercise_log) == 0:
return cls.DEFAULT_REACTION_TIME, cls.DEFAULT_PROP_CORRECT
avg_speed = mean([x.solving_speed for x in exercise_log])
prob_correct = mean([x.outcome.correct for x in exercise_log])
return avg_speed, prob_correct
def append_new_exercise(self, iteration):
"""
Add a new exercise to the exercise log
:param iteration: The current number of iteration / learning session
:return: The exercise
"""
random_outcome = ExerciseOutcome(
ExerciseOutcome.CORRECT) if random.random() < self.prob_correct else ExerciseOutcome(ExerciseOutcome.WRONG)
new_exercise = Exercise(random_outcome, self.exercise_source, self.avg_solving_speed, datetime.datetime.now())
new_exercise.id = iteration
self.exercises.append(new_exercise)
self.exercises_iteration.append(iteration)
return new_exercise
class AlgorithmSimulator:
"""
Simulates a word scheduling algorithm on a specific user.
"""
"""A word is excluded (learned) after x correct answers"""
correct_count_limit = 3
"""Assigned bookmark priority, when a bookmark is removed (learned)"""
removed_bookmark_priority = -1000
def __init__(self, user_id, algorithm=None):
"""
Create a new algorithm simulation for
:param user_id: The user id of a user
:param algorithm: The used word scheduling algorithm (not the wrapper)
"""
self.user_id = user_id
self.__create_database()
if algorithm is None:
algorithm = ArtsRT()
self.algo_wrapper = AlgorithmWrapper(algorithm)
self.bookmarks = self.__get_bookmarks_for_user(self.user_id)
def __create_database(self):
zeeguu_core.app = Flask("Zeeguu-Core-Test")
config_file = os.path.expanduser('../testing_default.cfg')
if "CONFIG_FILE" in os.environ:
config_file = os.environ["CONFIG_FILE"]
zeeguu_core.app.config.from_pyfile(config_file,
silent=False) # config.cfg is in the instance folder
zeeguu_core.db = flask_sqlalchemy.SQLAlchemy(zeeguu_core.app)
print(("running with DB: " + zeeguu_core.app.config.get("SQLALCHEMY_DATABASE_URI")))
zeeguu_core.db.create_all()
def set_algorithm_wrapper(self, new_algorithm_wrapper):
self.algo_wrapper = new_algorithm_wrapper
def calc_algorithm_stats(self, verbose=True):
"""
Calculate the parameter stats for the algorithm
:param verbose: Whether additional information is printed
:return: [words_in_parallel_mean, repetition_correct_mean, repetition_incorrect_mean]
words_in_parallel_mean = mean of words that where learned in parallel
repetition_correct_mean = mean of how many other words are repeated
before the correctly answered words is repeated (spacing)
repetition_incorrect_mean = mean of how many other words are repeated
before incorrectly answered words is repeated (spacing)
"""
if len(self.bookmarks) == 0:
return None
# reset random seed
random.seed(0)
bookmark_exercises = self.__run_algorithm_on_bookmarks(self.bookmarks, verbose=verbose)
return self.__calc_algorithm_result_stats(bookmark_exercises, verbose=verbose)
def __get_bookmarks_for_user(self, user_id):
user = User.find_by_id(user_id)
print('Using user ' + user.name + ' with id ' + str(user.id))
return user.all_bookmarks()
def __run_algorithm_on_bookmarks(self, bookmarks, iterations=200, verbose=True):
"""
Run the algorithm for amount x of iteration (learning sessions) on the specified list of bookmarks
In each iteration a new exercise is added based on the AverageBookmarkExercise
:return: a list of AverageBookmarkExercise
"""
print('Found ' + str(len(bookmarks)) + ' bookmarks')
bookmark_exercises = [AverageBookmarkExercise(x) for x in bookmarks]
# next_bookmark is used to know which bookmark has the highest priority in order to add a new exercise for it
next_bookmark = bookmark_exercises[0] # First, we simply choose the first bookmark
for i in range(0, iterations):
# generate new exercise
new_exercise = next_bookmark.append_new_exercise(i)
if verbose:
print("{:4} - {:} - {:1}".format(i, next_bookmark.bookmark.id, new_exercise.outcome.correct), end=', ')
# update priorities
max_priority = 0
for bookmark_exercise in bookmark_exercises:
new_priority = PriorityInfo.MAX_PRIORITY
last_exercises = bookmark_exercise.exercises[-self.correct_count_limit:]
if len(last_exercises) != 0:
count_correct = math.fsum([x.outcome.correct for x in last_exercises])
if count_correct == self.correct_count_limit:
new_priority = self.removed_bookmark_priority
else:
last_exercise = last_exercises[-1:][0]
try:
new_priority = self.algo_wrapper.calculate(last_exercise, i)
except Exception as e:
print('Exception during priority calculation: ' + str(e), e)
bookmark_exercise.priorities.append([i, new_priority])
if verbose:
if new_priority != self.removed_bookmark_priority:
print('{:+8.2f}'.format(new_priority), end=', ')
else:
print('{:8}'.format(''), end=', ')
if new_priority > max_priority:
next_bookmark = bookmark_exercise
max_priority = new_priority
if verbose:
print('') # newline
return bookmark_exercises
def __calc_algorithm_result_stats(self, bookmark_exercises, verbose=False):
"""
Calculate statistics based on the created AverageBookmarkExercise (list)
"""
# get the amount of iterations run
iterations = max(
[max(
map(lambda x: x[0], c.priorities)
, default=0)
for c in bookmark_exercises]
) + 1
words_in_parallel = [0 for _ in range(0, iterations)]
repetition_correct = [] # bookmark, iterations
repetition_incorrect = []
for bookmark_exercise in bookmark_exercises:
# for words_in_parallel
for priority_iteration in bookmark_exercise.priorities:
if priority_iteration[1] != self.removed_bookmark_priority:
words_in_parallel[priority_iteration[0]] += 1
# for repetition_correct_mean, repetition_incorrect_mean
for i in range(0, len(bookmark_exercise.exercises_iteration) - 1):
repetition_after = bookmark_exercise.exercises_iteration[i + 1] - \
bookmark_exercise.exercises_iteration[i]
if bookmark_exercise.exercises[i].outcome.correct:
repetition_correct.append(repetition_after)
else:
repetition_incorrect.append(repetition_after)
# remove all words that have not been covered at all
words_in_parallel = list(filter((0).__ne__, words_in_parallel))
words_in_parallel_mean = mean(words_in_parallel)
repetition_correct_mean = mean(repetition_correct)
repetition_incorrect_mean = mean(repetition_incorrect)
if verbose:
print('Concurrent words on average {:.4}, in raw: {:}'
.format(words_in_parallel_mean, words_in_parallel))
print('Repetition of correct words on average for every {:.4}, in raw: {:}'
.format(repetition_correct_mean, repetition_correct))
print('Repetition of incorrect words on average for every {:.4}, in raw: {:}'
.format(repetition_incorrect_mean, repetition_incorrect))
return [words_in_parallel_mean, repetition_correct_mean, repetition_incorrect_mean]
class OptimizationGoals:
def __init__(self,
words_in_parallel=10, words_in_parallel_factor=1.0,
repetition_correct=15, repetition_correct_factor=1.0,
repetition_incorrect=5, repetition_incorrect_factor=1.0):
"""
Used to specify on which goals to focus during the algorithm evaluation
:param words_in_parallel: Amount of words to study in parallel
:param words_in_parallel_factor: Weighting factor (higher=more important [relative to the others])
:param repetition_correct: After x words, correct words should reappear
:param repetition_correct_factor: Weighting factor (higher=more important [relative to the others])
:param repetition_incorrect: After x words, incorrect words should reappear
:param repetition_incorrect_factor: Weighting factor (higher=more important [relative to the others])
"""
self.words_in_parallel = words_in_parallel
self.words_in_parallel_factor = words_in_parallel_factor
self.repetition_correct = repetition_correct
self.repetition_correct_factor = repetition_correct_factor
self.repetition_incorrect = repetition_incorrect
self.repetition_incorrect_factor = repetition_incorrect_factor
class AlgorithmEvaluator:
"""
Approximates the algorithm parameter for a user according to set of optimization goals
"""
def __init__(self, user_id, algorithm, max_iterations=20, change_limit=1.0):
"""
Creates an AlgorithmEvaluator
:param user_id: The user id of the user to optimize for
:param algorithm: The used algorithm (no wrapper)
:param max_iterations: The amount of maximum iterations to run/abort
:param change_limit: Abort the approximation if the change between two runs is smaller than change_limit
"""
self.fancy = AlgorithmSimulator(user_id, algorithm=algorithm)
self.algorithm = algorithm
self.max_iterations = max_iterations
self.change_limit = change_limit
def fit_parameters(self, variables_to_set, optimization_goals):
"""
Fit the parameters of the algorithm to match optimization goals best
:param variables_to_set: a list of algorithm_variables
algorithm_variable: ['X', getattr(algorithm, 'X'), approximation_change]
where:
1. X is the variable/parameter in the algorithm
2. The current/starting value for X in the first run
3. approximation_change is the added approximation variable in the next run
Notes about the approximation_change:
The starting value gets increased by approximation_change in every run until no further improvement is made.
Then: The new approximation_change is -(approximation_change/2) to run backward again closer to the optimal
value until it overshoots and the approximation_change is again updated
:param optimization_goals: An instance of OptimizationGoals
:return: The variables_to_set where the second parameter is the found optimal value
"""
if len(self.fancy.bookmarks) == 0:
return None
iteration_counter = 0
tick_tock = 0 # ensure that when optimizing, itยดs stopped the earliest after all variables have been considered
# Init run
result_new = self.fancy.calc_algorithm_stats(verbose=False)
change = self.__diff_to_goal(optimization_goals, result_new)
# Only leave optimization when the change limit is too small and
# the optimization was run on all parameters (tick_tock)
while change > self.change_limit or tick_tock != 0:
print('------------------------------------------------------------------------')
print('Iteration {:3d} of the algorithm tickTock={}, variables={}'
.format(iteration_counter, tick_tock, variables_to_set))
new_variable_value = math.fabs(variables_to_set[tick_tock][1] + variables_to_set[tick_tock][2])
setattr(self.algorithm, variables_to_set[tick_tock][0], new_variable_value)
print('Trying now with D={}, b={}, w={}'.format(self.algorithm.d, self.algorithm.b, self.algorithm.w))
self.__update_algorithm_instance(self.algorithm)
# run the algorithm
result_new = self.fancy.calc_algorithm_stats(verbose=False)
# difference to desired goal
diff_to_goal = self.__diff_to_goal(optimization_goals, result_new)
if diff_to_goal < change:
print('Improvement found')
# We just did better
variables_to_set[tick_tock][1] = new_variable_value
change = diff_to_goal
else:
print('No further improvement')
# reset the variable
setattr(self.algorithm, variables_to_set[tick_tock][0], variables_to_set[tick_tock][1])
# Time to optimize on the other variable
variables_to_set[tick_tock][2] *= -0.5
tick_tock += 1
tick_tock = divmod(tick_tock, len(variables_to_set))[1]
iteration_counter = iteration_counter + 1
if iteration_counter > self.max_iterations:
print('Stopped due to max_iterations parameter')
break
print('')
print('The variables should be set the following way:')
for variable_to_set in variables_to_set:
print('{}={}'.format(variable_to_set[0], variable_to_set[1]))
return variables_to_set
def __update_algorithm_instance(self, algorithm_instance):
self.fancy.set_algorithm_wrapper(AlgorithmWrapper(algorithm_instance))
def __diff_to_goal(self, optimization_goals, result_new):
# corresponds to the output from __calc_algorithm_result_stats()
optimization_list = [
optimization_goals.words_in_parallel,
optimization_goals.repetition_correct,
optimization_goals.repetition_incorrect
]
optimization_list_factors = [
optimization_goals.words_in_parallel_factor,
optimization_goals.repetition_correct_factor,
optimization_goals.repetition_incorrect_factor
]
diffs = self.__calc_diff(result_new, optimization_list, optimization_list_factors)
result = math.fsum(diffs)
print(' concurrent words: {:6.4f}, correct words: {:6.4f}, incorrect words: {:6.4f}'.
format(result_new[0], result_new[1], result_new[2]))
print('Diff: {:6.4f}, concurrent words: {:6.4f}, correct words: {:6.4f}, incorrect words: {:6.4f}'.
format(result, diffs[0], diffs[1], diffs[2]))
return result
@staticmethod
def __calc_diff(a, b, factor=None):
if len(a) != len(b):
raise ValueError('size of parameters is different: len(a): {} vs len(b): {}'.format(len(a), len(b)))
if factor is None:
factor = [1 for _ in range(0, (len(a)))]
diffs = []
for i in range(0, len(a)):
diff = math.fabs(a[i] - b[i])
diffs.append(diff * factor[i])
return diffs
if __name__ == "__main__":
optimization_goals = OptimizationGoals(
words_in_parallel=20, words_in_parallel_factor=3,
repetition_correct_factor=0,
repetition_incorrect_factor=0
)
# update exercise source stats
BookmarkPriorityUpdater._update_exercise_source_stats()
# optimize for algorithm for these users
users = User.find_all()
start = timer()
user_ids = [user.id for user in users]
results = []
for user_id in user_ids:
algorithm = ArtsRT()
evaluator = AlgorithmEvaluator(user_id, algorithm, change_limit=1.0)
variables_to_set = [
['d', getattr(algorithm, 'd'), +5],
['b', getattr(algorithm, 'b'), +10],
['w', getattr(algorithm, 'w'), +10]
]
result = evaluator.fit_parameters(variables_to_set, optimization_goals)
if result is not None:
count_bookmarks = len(evaluator.fancy.bookmarks)
count_exercises = sum(map(lambda x: len(x.exercise_log), evaluator.fancy.bookmarks))
result = [user_id, list(map(lambda x: [x[0], x[1]], result)), count_bookmarks, count_exercises]
results.append(result)
else:
print('This user has no bookmarks. Skipping.')
end = timer()
print(results)
# print general (mean) results
users = list(map(lambda x: x[0], results))
parameters_d = list(map(lambda x: x[1][0][1], results))
parameters_b = list(map(lambda x: x[1][1][1], results))
parameters_w = list(map(lambda x: x[1][2][1], results))
bookmarks = list(map(lambda x: x[2], results))
exercises = list(map(lambda x: x[3], results))
print('Complete calculation took {:10.2f}s'.format((end-start)))
print('Printing results based on {} users ({} users have no bookmark and are skipped)'.format(len(results), len(user_ids)-len(results)))
print('Average user has {:6.2f} bookmarks'.format(mean(bookmarks)))
print('D: mean {:6.2f}, median {:6.2f}, range from {:6.2f} to {:6.2f}'.format(mean(parameters_d), median(parameters_d), min(parameters_d), max(parameters_d)))
print('b: mean {:6.2f}, median {:6.2f}, range from {:6.2f} to {:6.2f}'.format(mean(parameters_b), median(parameters_b), min(parameters_b), max(parameters_b)))
print('w: mean {:6.2f}, median {:6.2f}, range from {:6.2f} to {:6.2f}'.format(mean(parameters_w), median(parameters_w), min(parameters_w), max(parameters_w)))
# write data for further analysis to file
with open('algo_parameter_approximator.csv', 'w') as file:
wr = csv.writer(file)
wr.writerow(['user_id', 'd', 'b', 'w', 'bookmarks', 'exercises'])
rows = [users,
parameters_d,
parameters_b,
parameters_w,
bookmarks,
exercises]
rows_zip = zip(*rows)
wr.writerows(rows_zip)
#:nocov: | []
| []
| [
"CONFIG_FILE"
]
| [] | ["CONFIG_FILE"] | python | 1 | 0 | |
libgo/go/net/http/fs_test.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
c := ts.Client()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req, c)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req, c)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
func TestServeFile_DotDot(t *testing.T) {
tests := []struct {
req string
wantStatus int
}{
{"/testdata/file", 200},
{"/../file", 400},
{"/..", 400},
{"/../", 400},
{"/../foo", 400},
{"/..\\foo", 400},
{"/file/a", 200},
{"/file/a..", 200},
{"/file/a/..", 400},
{"/file/a\\..", 400},
}
for _, tt := range tests {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
if err != nil {
t.Errorf("bad request %q: %v", tt.req, err)
continue
}
rec := httptest.NewRecorder()
ServeFile(rec, req, "testdata/file")
if rec.Code != tt.wantStatus {
t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
{`foo:bar`, `<a href="./foo:bar">foo:bar</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func TestFileServerSortsNames(t *testing.T) {
defer afterTest(t)
const contents = "I am a fake file"
dirMod := time.Unix(123, 0).UTC()
fileMod := time.Unix(1000000000, 0).UTC()
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{
{
basename: "b",
modtime: fileMod,
contents: contents,
},
{
basename: "a",
modtime: fileMod,
contents: contents,
},
},
},
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("read Body: %v", err)
}
s := string(b)
if !strings.Contains(s, "<a href=\"a\">a</a>\n<a href=\"b\">b</a>") {
t.Errorf("output appears to be unsorted:\n%s", s)
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
// Issue 13996
func TestServeDirWithoutTrailingSlash(t *testing.T) {
e := "/testdata/"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, ".")
}))
defer ts.Close()
r, err := Get(ts.URL + "/testdata")
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if g := r.Request.URL.Path; g != e {
t.Errorf("got %s, want %s", g, e)
}
}
// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
// specified.
func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
// Because the testdata is so small, it would fit in
// both the h1 and h2 Server's write buffers. For h1,
// sendfile is used, though, forcing a header flush at
// the io.Copy. http2 doesn't do a header flush so
// buffers all 11 bytes and then adds its own
// Content-Length. To prevent the Server's
// Content-Length and test ServeFile only, flush here.
w.(Flusher).Flush()
}))
defer cst.close()
resp, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
if f.err != nil {
return nil, f.err
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
c := ts.Client()
res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantContentRange string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `W/"foo"`, // If-None-Match uses weak ETag comparison
reqHeader: map[string]string{
"If-None-Match": `"baz", W/"foo"`,
},
wantStatus: 304,
},
"if_none_match_mismatch": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"Foo"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"A"`,
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `W/"A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_no_overlap": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=10-20",
},
wantStatus: StatusRequestedRangeNotSatisfiable,
wantContentType: "text/plain; charset=utf-8",
wantContentRange: "bytes */8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_with_modtime": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_mismatch": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:19 GMT",
},
wantStatus: StatusOK,
wantContentType: "text/css; charset=utf-8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_nanos": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"unix_zero_modtime": {
content: strings.NewReader("<html>foo"),
modtime: time.Unix(0, 0),
wantStatus: StatusOK,
wantContentType: "text/html; charset=utf-8",
},
"ifmatch_matches": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"Z", "A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_star": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `*`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_failed": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"B"`,
},
wantStatus: 412,
},
"ifmatch_fails_on_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"If-Match": `W/"A"`,
},
wantStatus: 412,
},
"if_unmodified_since_true": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
"if_unmodified_since_false": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.Add(-2 * time.Second).UTC().Format(TimeFormat),
},
wantStatus: 412,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
for _, method := range []string{"GET", "HEAD"} {
//restore content in case it is consumed by previous method
if content, ok := content.(*strings.Reader); ok {
content.Seek(0, io.SeekStart)
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest(method, ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
c := ts.Client()
res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q using %q: got status = %d; want %d", testName, method, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q using %q: got content-type = %q, want %q", testName, method, g, e)
}
if g, e := res.Header.Get("Content-Range"), tt.wantContentRange; g != e {
t.Errorf("test %q using %q: got content-range = %q, want %q", testName, method, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q using %q: got last-modified = %q, want %q", testName, method, g, e)
}
}
}
}
// Issue 12991
func TestServerFileStatError(t *testing.T) {
rec := httptest.NewRecorder()
r, _ := NewRequest("GET", "http://foo/", nil)
redirect := false
name := "file.txt"
fs := issue12991FS{}
ExportServeFile(rec, r, fs, name, redirect)
if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
t.Errorf("wanted 403 forbidden message; got: %s", body)
}
}
type issue12991FS struct{}
func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
type issue12991File struct{ File }
func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
func (issue12991File) Close() error { return nil }
func TestServeContentErrorMessages(t *testing.T) {
defer afterTest(t)
fs := fakeFS{
"/500": &fakeFileInfo{
err: errors.New("random error"),
},
"/403": &fakeFileInfo{
err: &os.PathError{Err: os.ErrPermission},
},
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
c := ts.Client()
for _, code := range []int{403, 404, 500} {
res, err := c.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
}
if res.StatusCode != code {
t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
}
res.Body.Close()
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
setParallel(t)
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
case "mips64", "mips64le", "s390x", "alpha":
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
}
// Attempt to run strace, and skip on failure - this test requires SYS_PTRACE.
if err := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=^$").Run(); err != nil {
t.Skipf("skipping; failed to run strace: %v", err)
}
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(`)
out := buf.String()
if !rx.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request, client *Client) (*Response, []byte) {
r, err := client.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
// Issue 18984: tests that requests for paths beyond files return not-found errors
func TestFileServerNotDirError(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir("testdata")))
defer ts.Close()
res, err := Get(ts.URL + "/index.html/not-a-file")
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if res.StatusCode != 404 {
t.Errorf("StatusCode = %v; want 404", res.StatusCode)
}
test := func(name string, dir Dir) {
t.Run(name, func(t *testing.T) {
_, err = dir.Open("/index.html/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
_, err = dir.Open("/index.html/not-a-dir/not-a-file")
if err == nil {
t.Fatal("err == nil; want != nil")
}
if !os.IsNotExist(err) {
t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
}
})
}
absPath, err := filepath.Abs("testdata")
if err != nil {
t.Fatal("get abs path:", err)
}
test("RelativePath", Dir("testdata"))
test("AbsolutePath", Dir(absPath))
}
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
wantCode int
wantOpen []string
}{
{"/", 200, []string{"/", "/index.html"}},
{"/dir", 301, []string{"/dir"}},
{"/dir/", 200, []string{"/dir", "/dir/index.html"}},
}
for _, tt := range tests {
var log []string
rr := httptest.NewRecorder()
req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
if !reflect.DeepEqual(log, tt.wantOpen) {
t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
}
if rr.Code != tt.wantCode {
t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
}
}
}
type fileServerCleanPathDir struct {
log *[]string
}
func (d fileServerCleanPathDir) Open(path string) (File, error) {
*(d.log) = append(*(d.log), path)
if path == "/" || path == "/dir" || path == "/dir/" {
// Just return back something that's a directory.
return Dir(".").Open(".")
}
return nil, os.ErrNotExist
}
type panicOnSeek struct{ io.ReadSeeker }
func Test_scanETag(t *testing.T) {
tests := []struct {
in string
wantETag string
wantRemain string
}{
{`W/"etag-1"`, `W/"etag-1"`, ""},
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
{`"spaced etag"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
if etag != test.wantETag || remain != test.wantRemain {
t.Errorf("scanETag(%q)=%q %q, want %q %q", test.in, etag, remain, test.wantETag, test.wantRemain)
}
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
assignments/solutions/hw8/handout/python/rendering.py | """
2D rendering framework
Taken from: https://github.com/openai/gym/blob/master/gym/envs/classic_control/rendering.py
"""
from __future__ import division
import os
import six
import sys
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
class Error(Exception):
pass
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet' or 'conda install -c conda-forge pyglet'.
''')
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occured while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
import math
import numpy as np
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return None
elif isinstance(spec, six.string_types):
return pyglet.canvas.Display(spec)
else:
raise Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = pyglet.window.Window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.isopen = True
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
self.window.close()
def window_closed_by_user(self):
self.isopen = False
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width/(right-left)
scaley = self.height/(top-bottom)
self.transform = Transform(
translation=(-left*scalex, -bottom*scaley),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1,1,1,1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.frombuffer(image_data.data, dtype=np.uint8)
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1,:,0:3]
self.window.flip()
self.onetime_geoms = []
return arr if return_rgb_array else self.isopen
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1,:,0:3]
def __del__(self):
self.close()
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color=Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b):
self._color.vec4 = (r, g, b, 1)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4 : glBegin(GL_QUADS)
elif len(self.v) > 4 : glBegin(GL_POLYGON)
else: glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2*math.pi*i / res
points.append((math.cos(ang)*radius, math.sin(ang)*radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled: return FilledPolygon(v)
else: return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width/2, -width/2
box = make_polygon([(l,b), (l,t), (r,t), (r,b)])
circ0 = make_circle(width/2)
circ1 = make_circle(width/2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None, maxwidth=500):
self.window = None
self.isopen = False
self.display = display
self.maxwidth = maxwidth
def imshow(self, arr):
if self.window is None:
height, width, _channels = arr.shape
if width > self.maxwidth:
scale = self.maxwidth / width
width = int(scale * width)
height = int(scale * height)
self.window = pyglet.window.Window(width=width, height=height,
display=self.display, vsync=False, resizable=True)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
self.width = width
self.height = height
@self.window.event
def on_close():
self.isopen = False
assert len(arr.shape) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(arr.shape[1], arr.shape[0],
'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
texture = image.get_texture()
texture.width = self.width
texture.height = self.height
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
texture.blit(0, 0) # draw
self.window.flip()
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def __del__(self):
self.close() | []
| []
| [
"DYLD_FALLBACK_LIBRARY_PATH"
]
| [] | ["DYLD_FALLBACK_LIBRARY_PATH"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pmuseosfinal.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/workers/python/papyros/papyros.py | import os
import sys
import json
import python_runner
import friendly_traceback
from friendly_traceback.core import FriendlyTraceback
from collections.abc import Awaitable
from contextlib import contextmanager, redirect_stdout, redirect_stderr
from pyodide_worker_runner import install_imports
from pyodide import JsException, create_proxy
from .util import to_py
from .autocomplete import autocomplete
SYS_RECURSION_LIMIT = 500
class Papyros(python_runner.PyodideRunner):
def __init__(
self,
*,
source_code="",
filename="/my_program.py",
callback=None,
buffer_constructor=None,
limit=SYS_RECURSION_LIMIT
):
if callback is None:
raise ValueError("Callback must not be None")
if buffer_constructor is not None:
self.OutputBufferClass = lambda f: buffer_constructor(create_proxy(f))
super().__init__(source_code=source_code, filename=filename)
self.limit = limit
self.override_globals()
self.set_event_callback(callback)
def set_event_callback(self, event_callback):
def runner_callback(event_type, data):
def cb(typ, dat, contentType=None, **kwargs):
return event_callback(dict(type=typ, data=dat, contentType=contentType or "text/plain", **kwargs))
if event_type == "output":
parts = data["parts"]
if not isinstance(parts, list):
parts = [parts]
for part in to_py(parts):
typ = part["type"]
data = part["text"] if "text" in part else part["data"]
if typ in ["stderr", "traceback", "syntax_error"]:
cb("error", data, contentType=part.get("contentType"))
elif typ in ["input", "input_prompt"]:
# Do not display values entered by user for input
continue
else:
cb("output", data, contentType=part.get("contentType"))
elif event_type == "input":
return cb("input", data["prompt"])
elif event_type == "sleep":
return cb("sleep", data["seconds"]*1000, contentType="application/number")
else:
return cb(event_type, data.get("data", ""), contentType=data.get("contentType"))
self.set_callback(runner_callback)
def override_globals(self):
# Code is executed in a worker with less resources than ful environment
sys.setrecursionlimit(self.limit)
# Otherwise `import matplotlib` fails while assuming a browser backend
os.environ["MPLBACKEND"] = "AGG"
self.override_matplotlib()
def override_matplotlib(self):
try:
# workaround from https://github.com/pyodide/pyodide/issues/1518
import matplotlib.pyplot
import base64
from io import BytesIO
def show():
buf = BytesIO()
matplotlib.pyplot.savefig(buf, format="png")
buf.seek(0)
# encode to a base64 str
img = base64.b64encode(buf.read()).decode("utf-8")
matplotlib.pyplot.clf()
self.output("img", img, contentType="img/png;base64")
matplotlib.pyplot.show = show
except ModuleNotFoundError:
pass
async def install_imports(self, source_code, ignore_missing=True):
try:
await install_imports(source_code)
except (ValueError, JsException):
# Occurs when trying to fetch PyPi files for misspelled imports
if not ignore_missing:
raise
@contextmanager
def _execute_context(self):
with (
redirect_stdout(python_runner.output.SysStream("output", self.output_buffer)),
redirect_stderr(python_runner.output.SysStream("error", self.output_buffer)),
):
try:
yield
except BaseException as e:
self.output("traceback", **self.serialize_traceback(e))
self.post_run()
def pre_run(self, source_code, mode="exec", top_level_await=False):
self.override_globals()
return super().pre_run(source_code, mode=mode, top_level_await=top_level_await)
async def run_async(self, source_code, mode="exec", top_level_await=True):
with self._execute_context():
try:
code_obj = self.pre_run(source_code, mode=mode, top_level_await=top_level_await)
if code_obj:
result = self.execute(code_obj, mode)
while isinstance(result, Awaitable):
result = await result
return result
except BaseException as e:
# Sometimes KeyboardInterrupt is caught by Pyodide and raised as a PythonError
# with a js_error containing the reason
js_error = str(getattr(e, "js_error", ""))
if isinstance(e, KeyboardInterrupt) or "KeyboardInterrupt" in js_error:
self.callback("interrupt", data="KeyboardInterrupt", contentType="text/plain")
else:
raise
def serialize_syntax_error(self, exc):
raise # Rethrow to ensure FriendlyTraceback library is imported correctly
def serialize_traceback(self, exc):
# Allow friendly_traceback to inspect the code
friendly_traceback.source_cache.cache.add(self.filename, self.source_code)
# Initialize traceback
fr = FriendlyTraceback(type(exc), exc, exc.__traceback__)
fr.assign_generic()
fr.assign_cause()
# Translate properties to FriendlyError interface
tb = fr.info.get("shortened_traceback", "")
info = fr.info.get("generic", "")
why = fr.info.get("cause", "")
what = fr.info.get("message", "")
name = type(exc).__name__
user_start = 0
tb_lines = tb.split("\n")
# Find first line in traceback that involves code from the user
while user_start < len(tb_lines) and self.filename not in tb_lines[user_start]:
user_start += 1
# Find line containing Exception name, denoting end of location of issue
user_end = user_start + 1
while user_end < len(tb_lines) and name not in tb_lines[user_end]:
user_end += 1
where = "\n".join(tb_lines[user_start:user_end]) or ""
# Format for callback
return dict(
text=json.dumps(dict(
name=name,
traceback=tb,
info=info,
why=why,
where=where,
what=what
)),
contentType="text/json"
)
def autocomplete(self, context):
context = to_py(context)
self.set_source_code(context["text"])
return autocomplete(context)
def lint(self, code):
# PyLint runs into an issue when trying to import its dependencies
# Temporarily overriding os.devnull solves this issue
TEMP_DEV_NULL = "__papyros_dev_null"
with open(TEMP_DEV_NULL, "w") as f:
pass
orig_dev_null = os.devnull
os.devnull = TEMP_DEV_NULL
self.set_source_code(code)
from .linting import lint
os.devnull = orig_dev_null
lint_results = lint(self.filename)
return lint_results
| []
| []
| [
"MPLBACKEND"
]
| [] | ["MPLBACKEND"] | python | 1 | 0 | |
soracom/generated/cmd/sigfox_devices_list.go | // Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"net/url"
"os"
"github.com/spf13/cobra"
)
// SigfoxDevicesListCmdLastEvaluatedKey holds value of 'last_evaluated_key' option
var SigfoxDevicesListCmdLastEvaluatedKey string
// SigfoxDevicesListCmdTagName holds value of 'tag_name' option
var SigfoxDevicesListCmdTagName string
// SigfoxDevicesListCmdTagValue holds value of 'tag_value' option
var SigfoxDevicesListCmdTagValue string
// SigfoxDevicesListCmdTagValueMatchMode holds value of 'tag_value_match_mode' option
var SigfoxDevicesListCmdTagValueMatchMode string
// SigfoxDevicesListCmdLimit holds value of 'limit' option
var SigfoxDevicesListCmdLimit int64
// SigfoxDevicesListCmdPaginate indicates to do pagination or not
var SigfoxDevicesListCmdPaginate bool
// SigfoxDevicesListCmdOutputJSONL indicates to output with jsonl format
var SigfoxDevicesListCmdOutputJSONL bool
func init() {
SigfoxDevicesListCmd.Flags().StringVar(&SigfoxDevicesListCmdLastEvaluatedKey, "last-evaluated-key", "", TRAPI("The device ID of the last device retrieved on the current page. By specifying this parameter, you can continue to retrieve the list from the next device onward."))
SigfoxDevicesListCmd.Flags().StringVar(&SigfoxDevicesListCmdTagName, "tag-name", "", TRAPI("Tag name for filtering the search (exact match)."))
SigfoxDevicesListCmd.Flags().StringVar(&SigfoxDevicesListCmdTagValue, "tag-value", "", TRAPI("Tag search string for filtering the search. Required when `tag_name` has been specified."))
SigfoxDevicesListCmd.Flags().StringVar(&SigfoxDevicesListCmdTagValueMatchMode, "tag-value-match-mode", "exact", TRAPI("Tag match mode."))
SigfoxDevicesListCmd.Flags().Int64Var(&SigfoxDevicesListCmdLimit, "limit", 0, TRAPI("Maximum number of Sigfox devices to retrieve."))
SigfoxDevicesListCmd.Flags().BoolVar(&SigfoxDevicesListCmdPaginate, "fetch-all", false, TRCLI("cli.common_params.paginate.short_help"))
SigfoxDevicesListCmd.Flags().BoolVar(&SigfoxDevicesListCmdOutputJSONL, "jsonl", false, TRCLI("cli.common_params.jsonl.short_help"))
SigfoxDevicesCmd.AddCommand(SigfoxDevicesListCmd)
}
// SigfoxDevicesListCmd defines 'list' subcommand
var SigfoxDevicesListCmd = &cobra.Command{
Use: "list",
Short: TRAPI("/sigfox_devices:get:summary"),
Long: TRAPI(`/sigfox_devices:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unexpected arguments passed => %v", args)
}
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSigfoxDevicesListCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
if SigfoxDevicesListCmdOutputJSONL {
return printStringAsJSONL(body)
}
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSigfoxDevicesListCmdParams(ac *apiClient) (*apiParams, error) {
return &apiParams{
method: "GET",
path: buildPathForSigfoxDevicesListCmd("/sigfox_devices"),
query: buildQueryForSigfoxDevicesListCmd(),
doPagination: SigfoxDevicesListCmdPaginate,
paginationKeyHeaderInResponse: "x-soracom-next-key",
paginationRequestParameterInQuery: "last_evaluated_key",
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSigfoxDevicesListCmd(path string) string {
return path
}
func buildQueryForSigfoxDevicesListCmd() url.Values {
result := url.Values{}
if SigfoxDevicesListCmdLastEvaluatedKey != "" {
result.Add("last_evaluated_key", SigfoxDevicesListCmdLastEvaluatedKey)
}
if SigfoxDevicesListCmdTagName != "" {
result.Add("tag_name", SigfoxDevicesListCmdTagName)
}
if SigfoxDevicesListCmdTagValue != "" {
result.Add("tag_value", SigfoxDevicesListCmdTagValue)
}
if SigfoxDevicesListCmdTagValueMatchMode != "exact" {
result.Add("tag_value_match_mode", SigfoxDevicesListCmdTagValueMatchMode)
}
if SigfoxDevicesListCmdLimit != 0 {
result.Add("limit", sprintf("%d", SigfoxDevicesListCmdLimit))
}
return result
}
| [
"\"SORACOM_VERBOSE\""
]
| []
| [
"SORACOM_VERBOSE"
]
| [] | ["SORACOM_VERBOSE"] | go | 1 | 0 | |
cmd/lncli/main.go | // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
macaroon "gopkg.in/macaroon.v2"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/urfave/cli"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
defaultDataDir = "data"
defaultChainSubDir = "chain"
defaultTLSCertFilename = "tls.cert"
defaultMacaroonFilename = "admin.macaroon"
defaultRPCPort = "10009"
defaultRPCHostPort = "localhost:" + defaultRPCPort
)
var (
defaultLndDir = btcutil.AppDataDir("lnd", false)
defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename)
)
func fatal(err error) {
fmt.Fprintf(os.Stderr, "[lncli] %v\n", err)
os.Exit(1)
}
func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) {
conn := getClientConn(ctx, true)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewWalletUnlockerClient(conn), cleanUp
}
func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) {
conn := getClientConn(ctx, false)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewLightningClient(conn), cleanUp
}
func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn {
// First, we'll parse the args from the command.
tlsCertPath, macPath, err := extractPathArgs(ctx)
if err != nil {
fatal(err)
}
// Load the specified TLS certificate and build transport credentials
// with it.
creds, err := credentials.NewClientTLSFromFile(tlsCertPath, "")
if err != nil {
fatal(err)
}
// Create a dial options array.
opts := []grpc.DialOption{
grpc.WithTransportCredentials(creds),
}
// Only process macaroon credentials if --no-macaroons isn't set and
// if we're not skipping macaroon processing.
if !ctx.GlobalBool("no-macaroons") && !skipMacaroons {
// Load the specified macaroon file.
macBytes, err := ioutil.ReadFile(macPath)
if err != nil {
fatal(fmt.Errorf("unable to read macaroon path (check "+
"the network setting!): %v", err))
}
mac := &macaroon.Macaroon{}
if err = mac.UnmarshalBinary(macBytes); err != nil {
fatal(fmt.Errorf("unable to decode macaroon: %v", err))
}
macConstraints := []macaroons.Constraint{
// We add a time-based constraint to prevent replay of the
// macaroon. It's good for 60 seconds by default to make up for
// any discrepancy between client and server clocks, but leaking
// the macaroon before it becomes invalid makes it possible for
// an attacker to reuse the macaroon. In addition, the validity
// time of the macaroon is extended by the time the server clock
// is behind the client clock, or shortened by the time the
// server clock is ahead of the client clock (or invalid
// altogether if, in the latter case, this time is more than 60
// seconds).
// TODO(aakselrod): add better anti-replay protection.
macaroons.TimeoutConstraint(ctx.GlobalInt64("macaroontimeout")),
// Lock macaroon down to a specific IP address.
macaroons.IPLockConstraint(ctx.GlobalString("macaroonip")),
// ... Add more constraints if needed.
}
// Apply constraints to the macaroon.
constrainedMac, err := macaroons.AddConstraints(mac, macConstraints...)
if err != nil {
fatal(err)
}
// Now we append the macaroon credentials to the dial options.
cred := macaroons.NewMacaroonCredential(constrainedMac)
opts = append(opts, grpc.WithPerRPCCredentials(cred))
}
// We need to use a custom dialer so we can also connect to unix sockets
// and not just TCP addresses.
opts = append(
opts, grpc.WithDialer(
lncfg.ClientAddressDialer(defaultRPCPort),
),
)
conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...)
if err != nil {
fatal(fmt.Errorf("unable to connect to RPC server: %v", err))
}
return conn
}
// extractPathArgs parses the TLS certificate and macaroon paths from the
// command.
func extractPathArgs(ctx *cli.Context) (string, string, error) {
// We'll start off by parsing the active chain and network. These are
// needed to determine the correct path to the macaroon when not
// specified.
chain := strings.ToLower(ctx.GlobalString("chain"))
switch chain {
case "bitcoin", "litecoin":
default:
return "", "", fmt.Errorf("unknown chain: %v", chain)
}
network := strings.ToLower(ctx.GlobalString("network"))
switch network {
case "mainnet", "testnet", "regtest", "simnet":
default:
return "", "", fmt.Errorf("unknown network: %v", network)
}
// We'll now fetch the lnddir so we can make a decision on how to
// properly read the macaroons (if needed) and also the cert. This will
// either be the default, or will have been overwritten by the end
// user.
lndDir := cleanAndExpandPath(ctx.GlobalString("lnddir"))
// If the macaroon path as been manually provided, then we'll only
// target the specified file.
var macPath string
if ctx.GlobalString("macaroonpath") != "" {
macPath = cleanAndExpandPath(ctx.GlobalString("macaroonpath"))
} else {
// Otherwise, we'll go into the path:
// lnddir/data/chain/<chain>/<network> in order to fetch the
// macaroon that we need.
macPath = filepath.Join(
lndDir, defaultDataDir, defaultChainSubDir, chain,
network, defaultMacaroonFilename,
)
}
tlsCertPath := cleanAndExpandPath(ctx.GlobalString("tlscertpath"))
// If a custom lnd directory was set, we'll also check if custom paths
// for the TLS cert and macaroon file were set as well. If not, we'll
// override their paths so they can be found within the custom lnd
// directory set. This allows us to set a custom lnd directory, along
// with custom paths to the TLS cert and macaroon file.
if lndDir != defaultLndDir {
tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
}
return tlsCertPath, macPath, nil
}
func main() {
app := cli.NewApp()
app.Name = "lncli"
app.Version = build.Version()
app.Usage = "control plane for your Lightning Network Daemon (lnd)"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "rpcserver",
Value: defaultRPCHostPort,
Usage: "host:port of ln daemon",
},
cli.StringFlag{
Name: "lnddir",
Value: defaultLndDir,
Usage: "path to lnd's base directory",
},
cli.StringFlag{
Name: "tlscertpath",
Value: defaultTLSCertPath,
Usage: "path to TLS certificate",
},
cli.StringFlag{
Name: "chain, c",
Usage: "the chain lnd is running on e.g. bitcoin",
Value: "bitcoin",
},
cli.StringFlag{
Name: "network, n",
Usage: "the network lnd is running on e.g. mainnet, " +
"testnet, etc.",
Value: "mainnet",
},
cli.BoolFlag{
Name: "no-macaroons",
Usage: "disable macaroon authentication",
},
cli.StringFlag{
Name: "macaroonpath",
Usage: "path to macaroon file",
},
cli.Int64Flag{
Name: "macaroontimeout",
Value: 60,
Usage: "anti-replay macaroon validity time in seconds",
},
cli.StringFlag{
Name: "macaroonip",
Usage: "if set, lock macaroon to specific IP address",
},
}
app.Commands = []cli.Command{
createCommand,
unlockCommand,
changePasswordCommand,
newAddressCommand,
sendManyCommand,
sendCoinsCommand,
listUnspentCommand,
connectCommand,
disconnectCommand,
openChannelCommand,
closeChannelCommand,
closeAllChannelsCommand,
abandonChannelCommand,
listPeersCommand,
walletBalanceCommand,
channelBalanceCommand,
getInfoCommand,
pendingChannelsCommand,
sendPaymentCommand,
payInvoiceCommand,
sendToRouteCommand,
addInvoiceCommand,
lookupInvoiceCommand,
listInvoicesCommand,
listChannelsCommand,
closedChannelsCommand,
listPaymentsCommand,
describeGraphCommand,
getChanInfoCommand,
getNodeInfoCommand,
queryRoutesCommand,
getNetworkInfoCommand,
debugLevelCommand,
decodePayReqCommand,
listChainTxnsCommand,
stopCommand,
signMessageCommand,
verifyMessageCommand,
feeReportCommand,
updateChannelPolicyCommand,
forwardingHistoryCommand,
}
// Add any extra autopilot commands determined by build flags.
app.Commands = append(app.Commands, autopilotCommands()...)
if err := app.Run(os.Args); err != nil {
fatal(err)
}
}
// cleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
// This function is taken from https://github.com/btcsuite/btcd
func cleanAndExpandPath(path string) string {
if path == "" {
return ""
}
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
var homeDir string
user, err := user.Current()
if err == nil {
homeDir = user.HomeDir
} else {
homeDir = os.Getenv("HOME")
}
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but the variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
final_project/machinetranslation/translator.py | import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import os
from dotenv import load_dotenv
from pathlib import Path
#env_path = Path('.')
#load_dotenv()
#apikey = os.environ.get('apikey')
#url = os.environ.get('url')
apikey="mcev3cbmbl63wx-305i8uSlkisriru2QrA9pDS0C6Jvc"
url="https://api.us-south.language-translator.watson.cloud.ibm.com/instances/4c9d2f5f-2802-432a-9c18-a705a62ad041"
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
language_translator.set_disable_ssl_verification(False)
def english_to_french(english_text):
if english_text == '':
return "invalid!"
translation = language_translator.translate(
text=english_text, model_id='en-fr').get_result()
result = json.dumps(translation, indent=2, ensure_ascii=False)
result_dict = json.loads(result)
french_text = result_dict['translations'][0]['translation']
return french_text
def french_to_english(french_text):
if french_text == '':
return "invalid!"
translation = language_translator.translate(
text=french_text, model_id='fr-en').get_result()
result = json.dumps(translation, indent=2, ensure_ascii=False)
result_dict = json.loads(result)
english_text = result_dict['translations'][0]['translation']
return english_text
#print(english_to_french('Hello, how are you today?'))
#print(english_to_french('I love you'))
#print(french_to_english('Bonjour, comment vous รชtes aujourd\'hui?'))
#print(french_to_english('Je t\'aime'))
| []
| []
| [
"url",
"apikey"
]
| [] | ["url", "apikey"] | python | 2 | 0 | |
api/api.go | // Package api provides the HTTP server with wrappers for JSON responses. It
// validates data before passing it to the `db.Database`, which handles the
// query and serialization.
package api
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"github.com/cuducos/go-cnpj"
"github.com/cuducos/minha-receita/db"
)
// errorMessage is a helper to serialize an error message to JSON.
type errorMessage struct {
Message string `json:"message"`
}
// messageResponse takes a text message and a HTP status, wraps the message into a
// JSON output and writes it together with the proper headers to a response.
func messageResponse(w http.ResponseWriter, s int, m string) {
w.WriteHeader(s)
if m == "" {
return
}
b, err := json.Marshal(errorMessage{m})
if err != nil {
fmt.Fprintf(os.Stderr, "Could not wrap message in JSON: %s", m)
return
}
w.Write(b)
}
type api struct {
db db.Database
}
func (app api) backwardCompatibilityHandler(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return fmt.Errorf("No backward compatibilityt with method %s", r.Method)
}
if err := r.ParseForm(); err != nil {
return fmt.Errorf("Invalid payload")
}
v := r.Form.Get("cnpj")
if v == "" {
return fmt.Errorf("No CNPJ sent in the payload")
}
v = cnpj.Unmask(v)
if !cnpj.IsValid(v) {
return fmt.Errorf("Invalid CNPJ")
}
http.Redirect(w, r, fmt.Sprintf("/%s", v), http.StatusSeeOther)
return nil
}
func (app api) companyHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-type", "application/json")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding")
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusOK)
return
}
if r.Method != http.MethodGet {
err := app.backwardCompatibilityHandler(w, r)
if err != nil {
messageResponse(w, http.StatusMethodNotAllowed, "Essa URL aceita apenas o mรฉtodo GET.")
}
return
}
v := r.URL.Path
if v == "/" {
http.Redirect(w, r, "https://docs.minhareceita.org", 302)
return
}
if !cnpj.IsValid(v) {
messageResponse(w, http.StatusBadRequest, fmt.Sprintf("CNPJ %s invรกlido.", cnpj.Mask(v[1:])))
return
}
c, err := app.db.GetCompany(cnpj.Unmask(v))
if err != nil {
messageResponse(w, http.StatusNotFound, fmt.Sprintf("CNPJ %s nรฃo encontrado.", cnpj.Mask(v)))
return
}
w.WriteHeader(http.StatusOK)
s, err := c.JSON()
if err != nil {
messageResponse(w, http.StatusInternalServerError, fmt.Sprintf("Nรฃo foi possรญvel retornar os dados de %s em JSON.", cnpj.Mask(v)))
return
}
io.WriteString(w, s)
}
func (app api) healthHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
messageResponse(w, http.StatusMethodNotAllowed, "Essa URL aceita apenas o mรฉtodo GET.")
return
}
w.WriteHeader(http.StatusOK)
}
// Serve spins up the HTTP server.
func Serve(db db.Database) {
port := os.Getenv("PORT")
if port == "" {
log.Output(2, "No PORT environment variable found, using 8000.")
port = ":8000"
}
if !strings.HasPrefix(port, ":") {
port = ":" + port
}
nr := newRelicApp()
app := api{db: db}
http.HandleFunc(newRelicHandle(nr, "/", app.companyHandler))
http.HandleFunc(newRelicHandle(nr, "/healthz", app.healthHandler))
log.Fatal(http.ListenAndServe(port, nil))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
doc/source/conf.py | #
# Aodh documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'wsmeext.sphinxext',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxcontrib.pecanwsme.rest',
'stevedore.sphinxext',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'oslo_policy.sphinxpolicygen',
'oslo_policy.sphinxext',
'sphinxcontrib.httpdomain',
]
config_generator_config_file = os.path.join(ROOT,
'aodh/cmd/aodh-config-generator.conf')
sample_config_basename = '_static/aodh'
policy_generator_config_file = (
'../../aodh/cmd/aodh-policy-generator.conf'
)
sample_policy_basename = '_static/aodh'
wsme_protocols = ['restjson', 'restxml']
todo_include_todos = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2012-2015, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#', '**/*alembic*']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
html_theme = 'openstackdocs'
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/aodh'
openstackdocs_pdf_link = True
openstackdocs_bug_project = 'aodh'
openstackdocs_bug_tag = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Aodhdoc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'doc-aodh.tex', u'Aodh Documentation',
u'OpenStack Foundation', 'manual'),
]
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
'maxlistdepth': '10',
}
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
# Disable smartquotes, they don't work in latex
smartquotes_excludes = {'builders': ['latex']}
latex_domain_indices = False
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aodh', u'Aodh Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Aodh', u'Aodh Documentation', u'OpenStack',
'Aodh', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Aodh'
epub_author = u'OpenStack'
epub_publisher = u'OpenStack'
epub_copyright = u'2012-2015, OpenStack'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# NOTE(dhellmann): pbr used to set this option but now that we are
# using Sphinx>=1.6.2 it does not so we have to set it ourselves.
suppress_warnings = [
'app.add_directive', 'app.add_role',
'app.add_generic_role', 'app.add_node',
'image.nonlocal_uri',
]
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
checkerista/.env/Lib/site-packages/django/core/management/base.py | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
import os
import sys
from argparse import ArgumentParser, HelpFormatter
from io import TextIOBase
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, *, missing_args_message=None, called_from_command_line=None, **kwargs):
self.missing_args_message = missing_args_message
self.called_from_command_line = called_from_command_line
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (self.missing_args_message and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
def no_translations(handle_func):
"""Decorator that forces a command to run with translations deactivated."""
def wrapped(*args, **kwargs):
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
res = handle_func(*args, **kwargs)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return res
return wrapped
class DjangoHelpFormatter(HelpFormatter):
"""
Customized formatter so that command-specific arguments appear in the
--help output before arguments common to all commands.
"""
show_last = {
'--version', '--verbosity', '--traceback', '--settings', '--pythonpath',
'--no-color', '--force-color', '--skip-checks',
}
def _reordered_actions(self, actions):
return sorted(
actions,
key=lambda a: set(a.option_strings) & self.show_last != set()
)
def add_usage(self, usage, actions, *args, **kwargs):
super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs)
def add_arguments(self, actions):
super().add_arguments(self._reordered_actions(actions))
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``stealth_options``
A tuple of any options the command uses which aren't defined by the
argument parser.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
requires_migrations_checks = False
requires_system_checks = True
# Arguments, common to all commands, which aren't defined by the argument
# parser.
base_stealth_options = ('stderr', 'stdout')
# Command-specific options not defined by the argument parser.
stealth_options = ()
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color and force_color:
raise CommandError("'no_color' and 'force_color' can't be used together.")
if no_color:
self.style = no_style()
else:
self.style = color_style(force_color)
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
prog='%s %s' % (os.path.basename(prog_name), subcommand),
description=self.help or None,
formatter_class=DjangoHelpFormatter,
missing_args_message=getattr(self, 'missing_args_message', None),
called_from_command_line=getattr(self, '_called_from_command_line', None),
**kwargs
)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument(
'-v', '--verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output',
)
parser.add_argument(
'--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument(
'--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions')
parser.add_argument(
'--no-color', action='store_true',
help="Don't colorize the command output.",
)
parser.add_argument(
'--force-color', action='store_true',
help='Force colorization of the command output.',
)
if self.requires_system_checks:
parser.add_argument(
'--skip-checks', action='store_true',
help='Skip system checks.',
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['force_color'] and options['no_color']:
raise CommandError("The --no-color and --force-color options can't be used together.")
if options['force_color']:
self.style = color_style(force_color=True)
elif options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'])
if self.requires_system_checks and not options['skip_checks']:
self.check()
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
return output
def _run_checks(self, **kwargs):
return checks.run_checks(**kwargs)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = self._run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted({migration.app_label for migration, backwards in plan})
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unapplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unapplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
bindinfo/bind_test.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo_test
import (
"context"
"crypto/tls"
"flag"
"fmt"
"os"
"strconv"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/bindinfo"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/metrics"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/cluster"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/stmtsummary"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testleak"
dto "github.com/prometheus/client_model/go"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false))
autoid.SetStep(5000)
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
cluster cluster.Cluster
store kv.Storage
domain *domain.Domain
*parser.Parser
}
type mockSessionManager struct {
PS []*util.ProcessInfo
}
func (msm *mockSessionManager) ShowProcessList() map[uint64]*util.ProcessInfo {
ret := make(map[uint64]*util.ProcessInfo)
for _, item := range msm.PS {
ret[item.ID] = item
}
return ret
}
func (msm *mockSessionManager) GetProcessInfo(id uint64) (*util.ProcessInfo, bool) {
for _, item := range msm.PS {
if item.ID == id {
return item, true
}
}
return &util.ProcessInfo{}, false
}
func (msm *mockSessionManager) Kill(cid uint64, query bool) {
}
func (msm *mockSessionManager) KillAllConnections() {
}
func (msm *mockSessionManager) UpdateTLSConfig(cfg *tls.Config) {
}
func (msm *mockSessionManager) ServerID() uint64 {
return 1
}
var mockTikv = flag.Bool("mockTikv", true, "use mock tikv store in bind test")
func (s *testSuite) SetUpSuite(c *C) {
testleak.BeforeTest()
s.Parser = parser.New()
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
session.SetSchemaLease(0)
session.DisableStats4Test()
}
bindinfo.Lease = 0
d, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
d.SetStatsUpdating(true)
s.domain = d
}
func (s *testSuite) TearDownSuite(c *C) {
s.domain.Close()
s.store.Close()
testleak.AfterTest(c)()
}
func (s *testSuite) TearDownTest(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
r := tk.MustQuery("show tables")
for _, tb := range r.Rows() {
tableName := tb[0]
tk.MustExec(fmt.Sprintf("drop table %v", tableName))
}
}
func (s *testSuite) cleanBindingEnv(tk *testkit.TestKit) {
tk.MustExec("truncate table mysql.bind_info")
s.domain.BindHandle().Clear()
}
func (s *testSuite) TestBindParse(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("create table t(i int)")
tk.MustExec("create index index_t on t(i)")
originSQL := "select * from t"
bindSQL := "select * from t use index(index_t)"
defaultDb := "test"
status := "using"
charset := "utf8mb4"
collation := "utf8mb4_bin"
source := bindinfo.Manual
sql := fmt.Sprintf(`INSERT INTO mysql.bind_info(original_sql,bind_sql,default_db,status,create_time,update_time,charset,collation,source) VALUES ('%s', '%s', '%s', '%s', NOW(), NOW(),'%s', '%s', '%s')`,
originSQL, bindSQL, defaultDb, status, charset, collation, source)
tk.MustExec(sql)
bindHandle := bindinfo.NewBindHandle(tk.Se)
err := bindHandle.Update(true)
c.Check(err, IsNil)
c.Check(bindHandle.Size(), Equals, 1)
sql, hash := parser.NormalizeDigest("select * from t")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t")
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, "select * from t use index(index_t)")
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, Equals, "utf8mb4")
c.Check(bind.Collation, Equals, "utf8mb4_bin")
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
dur, err := bind.SinceUpdateTime()
c.Assert(err, IsNil)
c.Assert(int64(dur), GreaterEqual, int64(0))
// Test fields with quotes or slashes.
sql = `CREATE GLOBAL BINDING FOR select * from t where i BETWEEN "a" and "b" USING select * from t use index(index_t) where i BETWEEN "a\nb\rc\td\0e" and 'x'`
tk.MustExec(sql)
tk.MustExec(`DROP global binding for select * from t use index(idx) where i BETWEEN "a\nb\rc\td\0e" and "x"`)
// Test SetOprStmt.
tk.MustExec(`create binding for select * from t union all select * from t using select * from t use index(index_t) union all select * from t use index()`)
tk.MustExec(`drop binding for select * from t union all select * from t using select * from t use index(index_t) union all select * from t use index()`)
tk.MustExec(`create binding for select * from t INTERSECT select * from t using select * from t use index(index_t) INTERSECT select * from t use index()`)
tk.MustExec(`drop binding for select * from t INTERSECT select * from t using select * from t use index(index_t) INTERSECT select * from t use index()`)
tk.MustExec(`create binding for select * from t EXCEPT select * from t using select * from t use index(index_t) EXCEPT select * from t use index()`)
tk.MustExec(`drop binding for select * from t EXCEPT select * from t using select * from t use index(index_t) EXCEPT select * from t use index()`)
tk.MustExec(`create binding for (select * from t) union all (select * from t) using (select * from t use index(index_t)) union all (select * from t use index())`)
tk.MustExec(`drop binding for (select * from t) union all (select * from t) using (select * from t use index(index_t)) union all (select * from t use index())`)
// Test Update / Delete.
tk.MustExec("create table t1(a int, b int, c int, key(b), key(c))")
tk.MustExec("create table t2(a int, b int, c int, key(b), key(c))")
tk.MustExec("create binding for delete from t1 where b = 1 and c > 1 using delete /*+ use_index(t1, c) */ from t1 where b = 1 and c > 1")
tk.MustExec("drop binding for delete from t1 where b = 1 and c > 1 using delete /*+ use_index(t1, c) */ from t1 where b = 1 and c > 1")
tk.MustExec("create binding for delete t1, t2 from t1 inner join t2 on t1.b = t2.b where t1.c = 1 using delete /*+ hash_join(t1, t2), use_index(t1, c) */ t1, t2 from t1 inner join t2 on t1.b = t2.b where t1.c = 1")
tk.MustExec("drop binding for delete t1, t2 from t1 inner join t2 on t1.b = t2.b where t1.c = 1 using delete /*+ hash_join(t1, t2), use_index(t1, c) */ t1, t2 from t1 inner join t2 on t1.b = t2.b where t1.c = 1")
tk.MustExec("create binding for update t1 set a = 1 where b = 1 and c > 1 using update /*+ use_index(t1, c) */ t1 set a = 1 where b = 1 and c > 1")
tk.MustExec("drop binding for update t1 set a = 1 where b = 1 and c > 1 using update /*+ use_index(t1, c) */ t1 set a = 1 where b = 1 and c > 1")
tk.MustExec("create binding for update t1, t2 set t1.a = 1 where t1.b = t2.b using update /*+ inl_join(t1) */ t1, t2 set t1.a = 1 where t1.b = t2.b")
tk.MustExec("drop binding for update t1, t2 set t1.a = 1 where t1.b = t2.b using update /*+ inl_join(t1) */ t1, t2 set t1.a = 1 where t1.b = t2.b")
// Test Insert / Replace.
tk.MustExec("create binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert into t1 select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("drop binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert into t1 select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("create binding for replace into t1 select * from t2 where t2.b = 1 and t2.c > 1 using replace into t1 select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("drop binding for replace into t1 select * from t2 where t2.b = 1 and t2.c > 1 using replace into t1 select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1")
err = tk.ExecToErr("create binding for insert into t1 values(1,1,1) using insert into t1 values(1,1,1)")
c.Assert(err.Error(), Equals, "create binding only supports INSERT / REPLACE INTO SELECT")
err = tk.ExecToErr("create binding for replace into t1 values(1,1,1) using replace into t1 values(1,1,1)")
c.Assert(err.Error(), Equals, "create binding only supports INSERT / REPLACE INTO SELECT")
// Test errors.
tk.MustExec(`drop table if exists t1`)
tk.MustExec("create table t1(i int, s varchar(20))")
_, err = tk.Exec("create global binding for select * from t using select * from t1 use index for join(index_t)")
c.Assert(err, NotNil, Commentf("err %v", err))
}
var testSQLs = []struct {
createSQL string
overlaySQL string
querySQL string
originSQL string
bindSQL string
dropSQL string
memoryUsage float64
}{
{
createSQL: "binding for select * from t where i>100 using select * from t use index(index_t) where i>100",
overlaySQL: "binding for select * from t where i>99 using select * from t use index(index_t) where i>99",
querySQL: "select * from t where i > 30.0",
originSQL: "select * from t where i > ?",
bindSQL: "select * from t use index(index_t) where i>99",
dropSQL: "binding for select * from t where i>100",
memoryUsage: float64(97),
},
{
createSQL: "binding for select * from t union all select * from t using select * from t use index(index_t) union all select * from t use index()",
overlaySQL: "",
querySQL: "select * from t union all select * from t",
originSQL: "select * from t union all select * from t",
bindSQL: "select * from t use index(index_t) union all select * from t use index()",
dropSQL: "binding for select * from t union all select * from t",
memoryUsage: float64(138),
},
{
createSQL: "binding for (select * from t) union all (select * from t) using (select * from t use index(index_t)) union all (select * from t use index())",
overlaySQL: "",
querySQL: "(select * from t) union all (select * from t)",
originSQL: "( select * from t ) union all ( select * from t )",
bindSQL: "(select * from t use index(index_t)) union all (select * from t use index())",
dropSQL: "binding for (select * from t) union all (select * from t)",
memoryUsage: float64(150),
},
{
createSQL: "binding for select * from t intersect select * from t using select * from t use index(index_t) intersect select * from t use index()",
overlaySQL: "",
querySQL: "select * from t intersect select * from t",
originSQL: "select * from t intersect select * from t",
bindSQL: "select * from t use index(index_t) intersect select * from t use index()",
dropSQL: "binding for select * from t intersect select * from t",
memoryUsage: float64(138),
},
{
createSQL: "binding for select * from t except select * from t using select * from t use index(index_t) except select * from t use index()",
overlaySQL: "",
querySQL: "select * from t except select * from t",
originSQL: "select * from t except select * from t",
bindSQL: "select * from t use index(index_t) except select * from t use index()",
dropSQL: "binding for select * from t except select * from t",
memoryUsage: float64(132),
},
{
createSQL: "binding for delete from t where i = 1 using delete /*+ use_index(t,index_t) */ from t where i = 1",
overlaySQL: "",
querySQL: "delete from t where i = 2",
originSQL: "delete from t where i = ?",
bindSQL: "delete /*+ use_index(t,index_t) */ from t where i = 1",
dropSQL: "binding for delete from t where i = 1",
memoryUsage: float64(103),
},
{
createSQL: "binding for delete t, t1 from t inner join t1 on t.s = t1.s where t.i = 1 using delete /*+ use_index(t,index_t), hash_join(t,t1) */ t, t1 from t inner join t1 on t.s = t1.s where t.i = 1",
overlaySQL: "",
querySQL: "delete t, t1 from t inner join t1 on t.s = t1.s where t.i = 2",
originSQL: "delete t , t1 from t inner join t1 on t . s = t1 . s where t . i = ?",
bindSQL: "delete /*+ use_index(t,index_t), hash_join(t,t1) */ t, t1 from t inner join t1 on t.s = t1.s where t.i = 1",
dropSQL: "binding for delete t, t1 from t inner join t1 on t.s = t1.s where t.i = 1",
memoryUsage: float64(199),
},
{
createSQL: "binding for update t set s = 'a' where i = 1 using update /*+ use_index(t,index_t) */ t set s = 'a' where i = 1",
overlaySQL: "",
querySQL: "update t set s='b' where i=2",
originSQL: "update t set s = ? where i = ?",
bindSQL: "update /*+ use_index(t,index_t) */ t set s = 'a' where i = 1",
dropSQL: "binding for update t set s = 'a' where i = 1",
memoryUsage: float64(115),
},
{
createSQL: "binding for update t, t1 set t.s = 'a' where t.i = t1.i using update /*+ inl_join(t1) */ t, t1 set t.s = 'a' where t.i = t1.i",
overlaySQL: "",
querySQL: "update t , t1 set t.s='b' where t.i=t1.i",
originSQL: "update t , t1 set t . s = ? where t . i = t1 . i",
bindSQL: "update /*+ inl_join(t1) */ t, t1 set t.s = 'a' where t.i = t1.i",
dropSQL: "binding for update t, t1 set t.s = 'a' where t.i = t1.i",
memoryUsage: float64(136),
},
{
createSQL: "binding for insert into t1 select * from t where t.i = 1 using insert into t1 select /*+ use_index(t,index_t) */ * from t where t.i = 1",
overlaySQL: "",
querySQL: "insert into t1 select * from t where t.i = 2",
originSQL: "insert into t1 select * from t where t . i = ?",
bindSQL: "insert into t1 select /*+ use_index(t,index_t) */ * from t where t.i = 1",
dropSQL: "binding for insert into t1 select * from t where t.i = 1",
memoryUsage: float64(143),
},
{
createSQL: "binding for replace into t1 select * from t where t.i = 1 using replace into t1 select /*+ use_index(t,index_t) */ * from t where t.i = 1",
overlaySQL: "",
querySQL: "replace into t1 select * from t where t.i = 2",
originSQL: "replace into t1 select * from t where t . i = ?",
bindSQL: "replace into t1 select /*+ use_index(t,index_t) */ * from t where t.i = 1",
dropSQL: "binding for replace into t1 select * from t where t.i = 1",
memoryUsage: float64(145),
},
}
func (s *testSuite) TestGlobalBinding(c *C) {
tk := testkit.NewTestKit(c, s.store)
for _, testSQL := range testSQLs {
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(i int, s varchar(20))")
tk.MustExec("create table t1(i int, s varchar(20))")
tk.MustExec("create index index_t on t(i,s)")
metrics.BindTotalGauge.Reset()
metrics.BindMemoryUsage.Reset()
_, err := tk.Exec("create global " + testSQL.createSQL)
c.Assert(err, IsNil, Commentf("err %v", err))
if testSQL.overlaySQL != "" {
_, err = tk.Exec("create global " + testSQL.overlaySQL)
c.Assert(err, IsNil)
}
pb := &dto.Metric{}
metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(1))
metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
sql, hash := parser.NormalizeDigest(testSQL.querySQL)
bindData := s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, testSQL.originSQL)
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, testSQL.bindSQL)
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, NotNil)
c.Check(bind.Collation, NotNil)
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
rs, err := tk.Exec("show global bindings")
c.Assert(err, IsNil)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
c.Check(err, IsNil)
c.Check(chk.NumRows(), Equals, 1)
row := chk.GetRow(0)
c.Check(row.GetString(0), Equals, testSQL.originSQL)
c.Check(row.GetString(1), Equals, testSQL.bindSQL)
c.Check(row.GetString(2), Equals, "test")
c.Check(row.GetString(3), Equals, "using")
c.Check(row.GetTime(4), NotNil)
c.Check(row.GetTime(5), NotNil)
c.Check(row.GetString(6), NotNil)
c.Check(row.GetString(7), NotNil)
bindHandle := bindinfo.NewBindHandle(tk.Se)
err = bindHandle.Update(true)
c.Check(err, IsNil)
c.Check(bindHandle.Size(), Equals, 1)
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, testSQL.originSQL)
bind = bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, testSQL.bindSQL)
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, NotNil)
c.Check(bind.Collation, NotNil)
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
_, err = tk.Exec("drop global " + testSQL.dropSQL)
c.Check(err, IsNil)
bindData = s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Check(bindData, IsNil)
metrics.BindTotalGauge.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeGlobal, bindinfo.Using).Write(pb)
// From newly created global bind handle.
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
bindHandle = bindinfo.NewBindHandle(tk.Se)
err = bindHandle.Update(true)
c.Check(err, IsNil)
c.Check(bindHandle.Size(), Equals, 0)
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, IsNil)
rs, err = tk.Exec("show global bindings")
c.Assert(err, IsNil)
chk = rs.NewChunk()
err = rs.Next(context.TODO(), chk)
c.Check(err, IsNil)
c.Check(chk.NumRows(), Equals, 0)
_, err = tk.Exec("delete from mysql.bind_info")
c.Assert(err, IsNil)
}
}
func (s *testSuite) TestSessionBinding(c *C) {
tk := testkit.NewTestKit(c, s.store)
for _, testSQL := range testSQLs {
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(i int, s varchar(20))")
tk.MustExec("create table t1(i int, s varchar(20))")
tk.MustExec("create index index_t on t(i,s)")
metrics.BindTotalGauge.Reset()
metrics.BindMemoryUsage.Reset()
_, err := tk.Exec("create session " + testSQL.createSQL)
c.Assert(err, IsNil, Commentf("err %v", err))
if testSQL.overlaySQL != "" {
_, err = tk.Exec("create session " + testSQL.overlaySQL)
c.Assert(err, IsNil)
}
pb := &dto.Metric{}
metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(1))
metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, testSQL.memoryUsage)
handle := tk.Se.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle)
bindData := handle.GetBindRecord(testSQL.originSQL, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, testSQL.originSQL)
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, testSQL.bindSQL)
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, NotNil)
c.Check(bind.Collation, NotNil)
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
rs, err := tk.Exec("show global bindings")
c.Assert(err, IsNil)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
c.Check(err, IsNil)
c.Check(chk.NumRows(), Equals, 0)
rs, err = tk.Exec("show session bindings")
c.Assert(err, IsNil)
chk = rs.NewChunk()
err = rs.Next(context.TODO(), chk)
c.Check(err, IsNil)
c.Check(chk.NumRows(), Equals, 1)
row := chk.GetRow(0)
c.Check(row.GetString(0), Equals, testSQL.originSQL)
c.Check(row.GetString(1), Equals, testSQL.bindSQL)
c.Check(row.GetString(2), Equals, "test")
c.Check(row.GetString(3), Equals, "using")
c.Check(row.GetTime(4), NotNil)
c.Check(row.GetTime(5), NotNil)
c.Check(row.GetString(6), NotNil)
c.Check(row.GetString(7), NotNil)
_, err = tk.Exec("drop session " + testSQL.dropSQL)
c.Assert(err, IsNil)
bindData = handle.GetBindRecord(testSQL.originSQL, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, testSQL.originSQL)
c.Check(len(bindData.Bindings), Equals, 0)
metrics.BindTotalGauge.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
metrics.BindMemoryUsage.WithLabelValues(metrics.ScopeSession, bindinfo.Using).Write(pb)
c.Assert(pb.GetGauge().GetValue(), Equals, float64(0))
}
}
func (s *testSuite) TestGlobalAndSessionBindingBothExist(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(id int)")
tk.MustExec("create table t2(id int)")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
c.Assert(tk.HasPlan("SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
// Test bindingUsage, which indicates how many times the binding is used.
metrics.BindUsageCounter.Reset()
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
pb := &dto.Metric{}
metrics.BindUsageCounter.WithLabelValues(metrics.ScopeGlobal).Write(pb)
c.Assert(pb.GetCounter().GetValue(), Equals, float64(1))
// Test 'tidb_use_plan_baselines'
tk.MustExec("set @@tidb_use_plan_baselines = 0")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
tk.MustExec("set @@tidb_use_plan_baselines = 1")
// Test 'drop global binding'
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
// Test the case when global and session binding both exist
// PART1 : session binding should totally cover global binding
// use merge join as session binding here since the optimizer will choose hash join for this stmt in default
tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_HJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
tk.MustExec("create binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
// PART2 : the dropped session binding should continue to block the effect of global binding
tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
tk.MustExec("drop binding for SELECT * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
}
func (s *testSuite) TestExplain(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(id int)")
tk.MustExec("create table t2(id int)")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
c.Assert(tk.HasPlan("SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id")
// Add test for SetOprStmt
tk.MustExec("create index index_id on t1(id)")
c.Assert(tk.HasPlan("SELECT * from t1 union SELECT * from t1", "IndexReader"), IsFalse)
c.Assert(tk.HasPlan("SELECT * from t1 use index(index_id) union SELECT * from t1", "IndexReader"), IsTrue)
tk.MustExec("create global binding for SELECT * from t1 union SELECT * from t1 using SELECT * from t1 use index(index_id) union SELECT * from t1")
c.Assert(tk.HasPlan("SELECT * from t1 union SELECT * from t1", "IndexReader"), IsTrue)
tk.MustExec("drop global binding for SELECT * from t1 union SELECT * from t1")
}
// TestBindingSymbolList tests sql with "?, ?, ?, ?", fixes #13871
func (s *testSuite) TestBindingSymbolList(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, INDEX ia (a), INDEX ib (b));")
tk.MustExec("insert into t value(1, 1);")
// before binding
tk.MustQuery("select a, b from t where a = 3 limit 1, 100")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ia")
c.Assert(tk.MustUseIndex("select a, b from t where a = 3 limit 1, 100", "ia(a)"), IsTrue)
tk.MustExec(`create global binding for select a, b from t where a = 1 limit 0, 1 using select a, b from t use index (ib) where a = 1 limit 0, 1`)
// after binding
tk.MustQuery("select a, b from t where a = 3 limit 1, 100")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ib")
c.Assert(tk.MustUseIndex("select a, b from t where a = 3 limit 1, 100", "ib(b)"), IsTrue)
// Normalize
sql, hash := parser.NormalizeDigest("select a, b from t where a = 1 limit 0, 1")
bindData := s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Assert(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select a , b from t where a = ? limit ...")
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, "select a, b from t use index (ib) where a = 1 limit 0, 1")
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, NotNil)
c.Check(bind.Collation, NotNil)
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
}
func (s *testSuite) TestDMLSQLBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int, c int, key idx_b(b), key idx_c(c))")
tk.MustExec("create table t2(a int, b int, c int, key idx_b(b), key idx_c(c))")
tk.MustExec("delete from t1 where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t1:idx_b")
c.Assert(tk.MustUseIndex("delete from t1 where b = 1 and c > 1", "idx_b(b)"), IsTrue)
tk.MustExec("create global binding for delete from t1 where b = 1 and c > 1 using delete /*+ use_index(t1,idx_c) */ from t1 where b = 1 and c > 1")
tk.MustExec("delete from t1 where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t1:idx_c")
c.Assert(tk.MustUseIndex("delete from t1 where b = 1 and c > 1", "idx_c(c)"), IsTrue)
c.Assert(tk.HasPlan("delete t1, t2 from t1 inner join t2 on t1.b = t2.b", "HashJoin"), IsTrue)
tk.MustExec("create global binding for delete t1, t2 from t1 inner join t2 on t1.b = t2.b using delete /*+ inl_join(t1) */ t1, t2 from t1 inner join t2 on t1.b = t2.b")
c.Assert(tk.HasPlan("delete t1, t2 from t1 inner join t2 on t1.b = t2.b", "IndexJoin"), IsTrue)
tk.MustExec("update t1 set a = 1 where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t1:idx_b")
c.Assert(tk.MustUseIndex("update t1 set a = 1 where b = 1 and c > 1", "idx_b(b)"), IsTrue)
tk.MustExec("create global binding for update t1 set a = 1 where b = 1 and c > 1 using update /*+ use_index(t1,idx_c) */ t1 set a = 1 where b = 1 and c > 1")
tk.MustExec("delete from t1 where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t1:idx_c")
c.Assert(tk.MustUseIndex("update t1 set a = 1 where b = 1 and c > 1", "idx_c(c)"), IsTrue)
c.Assert(tk.HasPlan("update t1, t2 set t1.a = 1 where t1.b = t2.b", "HashJoin"), IsTrue)
tk.MustExec("create global binding for update t1, t2 set t1.a = 1 where t1.b = t2.b using update /*+ inl_join(t1) */ t1, t2 set t1.a = 1 where t1.b = t2.b")
c.Assert(tk.HasPlan("update t1, t2 set t1.a = 1 where t1.b = t2.b", "IndexJoin"), IsTrue)
tk.MustExec("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t2:idx_b")
c.Assert(tk.MustUseIndex("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2", "idx_b(b)"), IsTrue)
tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert /*+ use_index(t2,idx_c) */ into t1 select * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t2:idx_b")
c.Assert(tk.MustUseIndex("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2", "idx_b(b)"), IsTrue)
tk.MustExec("drop global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t2:idx_c")
c.Assert(tk.MustUseIndex("insert into t1 select * from t2 where t2.b = 2 and t2.c > 2", "idx_c(c)"), IsTrue)
tk.MustExec("replace into t1 select * from t2 where t2.b = 2 and t2.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t2:idx_b")
c.Assert(tk.MustUseIndex("replace into t1 select * from t2 where t2.b = 2 and t2.c > 2", "idx_b(b)"), IsTrue)
tk.MustExec("create global binding for replace into t1 select * from t2 where t2.b = 1 and t2.c > 1 using replace into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1")
tk.MustExec("replace into t1 select * from t2 where t2.b = 2 and t2.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t2:idx_c")
c.Assert(tk.MustUseIndex("replace into t1 select * from t2 where t2.b = 2 and t2.c > 2", "idx_c(c)"), IsTrue)
}
func (s *testSuite) TestBestPlanInBaselines(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, INDEX ia (a), INDEX ib (b));")
tk.MustExec("insert into t value(1, 1);")
// before binding
tk.MustQuery("select a, b from t where a = 3 limit 1, 100")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ia")
c.Assert(tk.MustUseIndex("select a, b from t where a = 3 limit 1, 100", "ia(a)"), IsTrue)
tk.MustQuery("select a, b from t where b = 3 limit 1, 100")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ib")
c.Assert(tk.MustUseIndex("select a, b from t where b = 3 limit 1, 100", "ib(b)"), IsTrue)
tk.MustExec(`create global binding for select a, b from t where a = 1 limit 0, 1 using select /*+ use_index(@sel_1 test.t, ia) */ a, b from t where a = 1 limit 0, 1`)
tk.MustExec(`create global binding for select a, b from t where b = 1 limit 0, 1 using select /*+ use_index(@sel_1 test.t, ib) */ a, b from t where b = 1 limit 0, 1`)
sql, hash := parser.NormalizeDigest("select a, b from t where a = 1 limit 0, 1")
bindData := s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select a , b from t where a = ? limit ...")
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, "select /*+ use_index(@sel_1 test.t, ia) */ a, b from t where a = 1 limit 0, 1")
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
tk.MustQuery("select a, b from t where a = 3 limit 1, 10")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ia")
c.Assert(tk.MustUseIndex("select a, b from t where a = 3 limit 1, 100", "ia(a)"), IsTrue)
tk.MustQuery("select a, b from t where b = 3 limit 1, 100")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:ib")
c.Assert(tk.MustUseIndex("select a, b from t where b = 3 limit 1, 100", "ib(b)"), IsTrue)
}
func (s *testSuite) TestErrorBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustGetErrMsg("create global binding for select * from t using select * from t", "[schema:1146]Table 'test.t' doesn't exist")
tk.MustExec("drop table if exists t")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t(i int, s varchar(20))")
tk.MustExec("create table t1(i int, s varchar(20))")
tk.MustExec("create index index_t on t(i,s)")
_, err := tk.Exec("create global binding for select * from t where i>100 using select * from t use index(index_t) where i>100")
c.Assert(err, IsNil, Commentf("err %v", err))
sql, hash := parser.NormalizeDigest("select * from t where i > ?")
bindData := s.domain.BindHandle().GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where i > ?")
bind := bindData.Bindings[0]
c.Check(bind.BindSQL, Equals, "select * from t use index(index_t) where i>100")
c.Check(bindData.Db, Equals, "test")
c.Check(bind.Status, Equals, "using")
c.Check(bind.Charset, NotNil)
c.Check(bind.Collation, NotNil)
c.Check(bind.CreateTime, NotNil)
c.Check(bind.UpdateTime, NotNil)
tk.MustExec("drop index index_t on t")
_, err = tk.Exec("select * from t where i > 10")
c.Check(err, IsNil)
s.domain.BindHandle().DropInvalidBindRecord()
rs, err := tk.Exec("show global bindings")
c.Assert(err, IsNil)
chk := rs.NewChunk()
err = rs.Next(context.TODO(), chk)
c.Check(err, IsNil)
c.Check(chk.NumRows(), Equals, 0)
}
func (s *testSuite) TestPreparedStmt(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
orgEnable := plannercore.PreparedPlanCacheEnabled()
defer func() {
plannercore.SetPreparedPlanCache(orgEnable)
}()
plannercore.SetPreparedPlanCache(false) // requires plan cache disabled, or the IndexNames = 1 on first test.
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec(`prepare stmt1 from 'select * from t'`)
tk.MustExec("execute stmt1")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 0)
tk.MustExec("create binding for select * from t using select * from t use index(idx)")
tk.MustExec("execute stmt1")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx")
tk.MustExec("drop binding for select * from t")
tk.MustExec("execute stmt1")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 0)
tk.MustExec("drop table t")
tk.MustExec("create table t(a int, b int, c int, index idx_b(b), index idx_c(c))")
tk.MustExec("set @p = 1")
tk.MustExec("prepare stmt from 'delete from t where b = ? and c > ?'")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
tk.MustExec("create binding for delete from t where b = 2 and c > 2 using delete /*+ use_index(t,idx_c) */ from t where b = 2 and c > 2")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("prepare stmt from 'update t set a = 1 where b = ? and c > ?'")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
tk.MustExec("create binding for update t set a = 2 where b = 2 and c > 2 using update /*+ use_index(t,idx_c) */ t set a = 2 where b = 2 and c > 2")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 like t")
tk.MustExec("prepare stmt from 'insert into t1 select * from t where t.b = ? and t.c > ?'")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
tk.MustExec("create binding for insert into t1 select * from t where t.b = 2 and t.c > 2 using insert into t1 select /*+ use_index(t,idx_c) */ * from t where t.b = 2 and t.c > 2")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("prepare stmt from 'replace into t1 select * from t where t.b = ? and t.c > ?'")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
tk.MustExec("create binding for replace into t1 select * from t where t.b = 2 and t.c > 2 using replace into t1 select /*+ use_index(t,idx_c) */ * from t where t.b = 2 and t.c > 2")
tk.MustExec("execute stmt using @p,@p")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
}
func (s *testSuite) TestDMLCapturePlanBaseline(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec(" set @@tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec(" set @@tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, key idx_b(b), key idx_c(c))")
tk.MustExec("create table t1 like t")
s.domain.BindHandle().CaptureBaselines()
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 0)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
c.Assert(len(rows), Equals, 4)
c.Assert(rows[0][0], Equals, "delete from t where b = ? and c > ?")
c.Assert(rows[0][1], Equals, "DELETE /*+ use_index(@`del_1` `test`.`t` `idx_b`)*/ FROM `t` WHERE `b`=1 AND `c`>1")
c.Assert(rows[1][0], Equals, "insert into t1 select * from t where t . b = ? and t . c > ?")
c.Assert(rows[1][1], Equals, "INSERT INTO `t1` SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_b`)*/ * FROM `t` WHERE `t`.`b`=1 AND `t`.`c`>1")
c.Assert(rows[2][0], Equals, "replace into t1 select * from t where t . b = ? and t . c > ?")
c.Assert(rows[2][1], Equals, "REPLACE INTO `t1` SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_b`)*/ * FROM `t` WHERE `t`.`b`=1 AND `t`.`c`>1")
c.Assert(rows[3][0], Equals, "update t set a = ? where b = ? and c > ?")
c.Assert(rows[3][1], Equals, "UPDATE /*+ use_index(@`upd_1` `test`.`t` `idx_b`)*/ `t` SET `a`=1 WHERE `b`=1 AND `c`>1")
}
func (s *testSuite) TestCapturePlanBaseline(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec(" set @@tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec(" set @@tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
s.domain.BindHandle().CaptureBaselines()
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("select count(*) from t where a > 10")
tk.MustExec("select count(*) from t where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 0)
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][0], Equals, "select * from t where a > ?")
c.Assert(rows[0][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `t` WHERE `a`>10")
}
func (s *testSuite) TestCaptureDBCaseSensitivity(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("drop database if exists SPM")
tk.MustExec("create database SPM")
tk.MustExec("use SPM")
tk.MustExec("create table t(a int, b int, key(b))")
tk.MustExec("create global binding for select * from t using select /*+ use_index(t) */ * from t")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select /*+ use_index(t,b) */ * from t")
tk.MustExec("select /*+ use_index(t,b) */ * from t")
tk.MustExec("admin capture bindings")
// The capture should ignore the case sensitivity for DB name when checking if any binding exists,
// so there would be no new binding captured.
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select /*+ use_index(t) */ * from t")
c.Assert(rows[0][8], Equals, "manual")
}
func (s *testSuite) TestCaptureBaselinesDefaultDB(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec(" set @@tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec(" set @@tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop database if exists spm")
tk.MustExec("create database spm")
tk.MustExec("create table spm.t(a int, index idx_a(a))")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select * from spm.t ignore index(idx_a) where a > 10")
tk.MustExec("select * from spm.t ignore index(idx_a) where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
// Default DB should be "" when all columns have explicit database name.
c.Assert(rows[0][2], Equals, "")
c.Assert(rows[0][3], Equals, "using")
tk.MustExec("use spm")
tk.MustExec("select * from spm.t where a > 10")
// Should use TableScan because of the "ignore index" binding.
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 0)
}
func (s *testSuite) TestDropSingleBindings(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, index idx_a(a), index idx_b(b))")
// Test drop session bindings.
tk.MustExec("create binding for select * from t using select * from t use index(idx_a)")
tk.MustExec("create binding for select * from t using select * from t use index(idx_b)")
rows := tk.MustQuery("show bindings").Rows()
// The size of bindings is equal to one. Because for one normalized sql,
// the `create binding` clears all the origin bindings.
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select * from t use index(idx_b)")
tk.MustExec("drop binding for select * from t using select * from t use index(idx_a)")
rows = tk.MustQuery("show bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select * from t use index(idx_b)")
tk.MustExec("drop table t")
tk.MustExec("drop binding for select * from t using select * from t use index(idx_b)")
rows = tk.MustQuery("show bindings").Rows()
c.Assert(len(rows), Equals, 0)
tk.MustExec("create table t(a int, b int, c int, index idx_a(a), index idx_b(b))")
// Test drop global bindings.
tk.MustExec("create global binding for select * from t using select * from t use index(idx_a)")
tk.MustExec("create global binding for select * from t using select * from t use index(idx_b)")
rows = tk.MustQuery("show global bindings").Rows()
// The size of bindings is equal to one. Because for one normalized sql,
// the `create binding` clears all the origin bindings.
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select * from t use index(idx_b)")
tk.MustExec("drop global binding for select * from t using select * from t use index(idx_a)")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select * from t use index(idx_b)")
tk.MustExec("drop table t")
tk.MustExec("drop global binding for select * from t using select * from t use index(idx_b)")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 0)
}
func (s *testSuite) TestDMLEvolveBaselines(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, index idx_b(b), index idx_c(c))")
tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5)")
tk.MustExec("analyze table t")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustExec("create global binding for delete from t where b = 1 and c > 1 using delete /*+ use_index(t,idx_c) */ from t where b = 1 and c > 1")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("delete /*+ use_index(t,idx_b) */ from t where b = 2 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("create global binding for update t set a = 1 where b = 1 and c > 1 using update /*+ use_index(t,idx_c) */ t set a = 1 where b = 1 and c > 1")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
tk.MustExec("update /*+ use_index(t,idx_b) */ t set a = 2 where b = 2 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
tk.MustExec("create table t1 like t")
tk.MustExec("create global binding for insert into t1 select * from t where t.b = 1 and t.c > 1 using insert into t1 select /*+ use_index(t,idx_c) */ * from t where t.b = 1 and t.c > 1")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 3)
tk.MustExec("insert into t1 select /*+ use_index(t,idx_b) */ * from t where t.b = 2 and t.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 3)
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 3)
tk.MustExec("create global binding for replace into t1 select * from t where t.b = 1 and t.c > 1 using replace into t1 select /*+ use_index(t,idx_c) */ * from t where t.b = 1 and t.c > 1")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 4)
tk.MustExec("replace into t1 select /*+ use_index(t,idx_b) */ * from t where t.b = 2 and t.c > 2")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 4)
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 4)
}
func (s *testSuite) TestAddEvolveTasks(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, index idx_a(a), index idx_b(b), index idx_c(c))")
tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5)")
tk.MustExec("analyze table t")
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 and c = 0 using select * from t use index(idx_a) where a >= 1 and b >= 1 and c = 0")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
// It cannot choose table path although it has lowest cost.
tk.MustQuery("select * from t where a >= 4 and b >= 1 and c = 0")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_a")
tk.MustExec("admin flush bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
c.Assert(rows[1][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t` WHERE `a`>=4 AND `b`>=1 AND `c`=0")
c.Assert(rows[1][3], Equals, "pending verify")
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
c.Assert(rows[1][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t` WHERE `a`>=4 AND `b`>=1 AND `c`=0")
status := rows[1][3].(string)
c.Assert(status == "using" || status == "rejected", IsTrue)
}
func (s *testSuite) TestRuntimeHintsInEvolveTasks(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustExec("create table t(a int, b int, c int, index idx_a(a), index idx_b(b), index idx_c(c))")
// these runtime hints which don't be contained by the original binding should be ignored
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 and c = 0 using select * from t use index(idx_a) where a >= 1 and b >= 1 and c = 0")
tk.MustQuery("select /*+ MAX_EXECUTION_TIME(5000) */* from t where a >= 4 and b >= 1 and c = 0")
tk.MustExec("admin flush bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
c.Assert(rows[1][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_c`)*/ * FROM `test`.`t` WHERE `a`>=4 AND `b`>=1 AND `c`=0") // MAX_EXECUTION_TIME is ignored
s.cleanBindingEnv(tk)
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 and c = 0 using select /*+ MAX_EXECUTION_TIME(5000) */* from t use index(idx_a) where a >= 1 and b >= 1 and c = 0")
tk.MustQuery("select /*+ MAX_EXECUTION_TIME(5000) */* from t where a >= 4 and b >= 1 and c = 0")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
c.Assert(rows[1][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_c`), max_execution_time(5000)*/ * FROM `test`.`t` WHERE `a`>=4 AND `b`>=1 AND `c`=0")
}
func (s *testSuite) TestBindingCache(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx);")
tk.MustExec("create database tmp")
tk.MustExec("use tmp")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx);")
c.Assert(s.domain.BindHandle().Update(false), IsNil)
c.Assert(s.domain.BindHandle().Update(false), IsNil)
res := tk.MustQuery("show global bindings")
c.Assert(len(res.Rows()), Equals, 2)
tk.MustExec("drop global binding for select * from t;")
c.Assert(s.domain.BindHandle().Update(false), IsNil)
c.Assert(len(s.domain.BindHandle().GetAllBindRecord()), Equals, 1)
}
func (s *testSuite) TestDefaultSessionVars(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustQuery(`show variables like "%baselines%"`).Sort().Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
"tidb_evolve_plan_baselines OFF",
"tidb_use_plan_baselines ON"))
tk.MustQuery(`show global variables like "%baselines%"`).Sort().Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
"tidb_evolve_plan_baselines OFF",
"tidb_use_plan_baselines ON"))
}
func (s *testSuite) TestCaptureBaselinesScope(c *C) {
tk1 := testkit.NewTestKit(c, s.store)
tk2 := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk1)
tk1.MustQuery(`show session variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
))
tk1.MustQuery(`show global variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
))
tk1.MustQuery(`select @@session.tidb_capture_plan_baselines`).Check(testkit.Rows(
"0",
))
tk1.MustQuery(`select @@global.tidb_capture_plan_baselines`).Check(testkit.Rows(
"0",
))
tk1.MustExec("set @@session.tidb_capture_plan_baselines = on")
defer func() {
tk1.MustExec(" set @@session.tidb_capture_plan_baselines = off")
}()
tk1.MustQuery(`show session variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines ON",
))
tk1.MustQuery(`show global variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
))
tk1.MustQuery(`select @@session.tidb_capture_plan_baselines`).Check(testkit.Rows(
"1",
))
tk1.MustQuery(`select @@global.tidb_capture_plan_baselines`).Check(testkit.Rows(
"0",
))
tk2.MustQuery(`show session variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines ON",
))
tk2.MustQuery(`show global variables like "tidb_capture_plan_baselines"`).Check(testkit.Rows(
"tidb_capture_plan_baselines OFF",
))
tk2.MustQuery(`select @@session.tidb_capture_plan_baselines`).Check(testkit.Rows(
"1",
))
tk2.MustQuery(`select @@global.tidb_capture_plan_baselines`).Check(testkit.Rows(
"0",
))
}
func (s *testSuite) TestDuplicateBindings(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx);")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
createTime := rows[0][4]
time.Sleep(1000000)
tk.MustExec("create global binding for select * from t using select * from t use index(idx);")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(createTime == rows[0][4], Equals, false)
tk.MustExec("create session binding for select * from t using select * from t use index(idx);")
rows = tk.MustQuery("show session bindings").Rows()
c.Assert(len(rows), Equals, 1)
createTime = rows[0][4]
time.Sleep(1000000)
tk.MustExec("create session binding for select * from t using select * from t use index(idx);")
rows = tk.MustQuery("show session bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(createTime == rows[0][4], Equals, false)
}
func (s *testSuite) TestStmtHints(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select /*+ MAX_EXECUTION_TIME(100), MEMORY_QUOTA(1 GB) */ * from t use index(idx)")
tk.MustQuery("select * from t")
c.Assert(tk.Se.GetSessionVars().StmtCtx.MemQuotaQuery, Equals, int64(1073741824))
c.Assert(tk.Se.GetSessionVars().StmtCtx.MaxExecutionTime, Equals, uint64(100))
tk.MustQuery("select a, b from t")
c.Assert(tk.Se.GetSessionVars().StmtCtx.MemQuotaQuery, Equals, int64(0))
c.Assert(tk.Se.GetSessionVars().StmtCtx.MaxExecutionTime, Equals, uint64(0))
}
func (s *testSuite) TestReloadBindings(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx)")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
rows = tk.MustQuery("select * from mysql.bind_info").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("truncate table mysql.bind_info")
c.Assert(s.domain.BindHandle().Update(false), IsNil)
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(s.domain.BindHandle().Update(true), IsNil)
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("admin reload bindings")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 0)
}
func (s *testSuite) TestDefaultDB(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from test.t using select * from test.t use index(idx)")
tk.MustExec("use mysql")
tk.MustQuery("select * from test.t")
// Even in another database, we could still use the bindings.
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx")
tk.MustExec("drop global binding for select * from test.t")
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("use test")
tk.MustExec("create session binding for select * from test.t using select * from test.t use index(idx)")
tk.MustExec("use mysql")
tk.MustQuery("select * from test.t")
// Even in another database, we could still use the bindings.
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx")
tk.MustExec("drop session binding for select * from test.t")
tk.MustQuery("show session bindings").Check(testkit.Rows())
}
func (s *testSuite) TestEvolveInvalidBindings(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx_a(a))")
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ USE_INDEX(t) */ * from t where a > 10")
// Manufacture a rejected binding by hacking mysql.bind_info.
tk.MustExec("insert into mysql.bind_info values('select * from t where a > ?', 'select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10', 'test', 'rejected', '2000-01-01 09:00:00', '2000-01-01 09:00:00', '', '','" +
bindinfo.Manual + "')")
tk.MustQuery("select bind_sql, status from mysql.bind_info").Sort().Check(testkit.Rows(
"select /*+ USE_INDEX(t) */ * from t where a > 10 using",
"select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10 rejected",
))
// Reload cache from mysql.bind_info.
s.domain.BindHandle().Clear()
c.Assert(s.domain.BindHandle().Update(true), IsNil)
tk.MustExec("alter table t drop index idx_a")
tk.MustExec("admin evolve bindings")
c.Assert(s.domain.BindHandle().Update(false), IsNil)
rows := tk.MustQuery("show global bindings").Sort().Rows()
c.Assert(len(rows), Equals, 2)
// Make sure this "using" binding is not overrided.
c.Assert(rows[0][1], Equals, "select /*+ USE_INDEX(t) */ * from t where a > 10")
status := rows[0][3].(string)
c.Assert(status == "using", IsTrue)
c.Assert(rows[1][1], Equals, "select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10")
status = rows[1][3].(string)
c.Assert(status == "using" || status == "rejected", IsTrue)
}
func (s *testSuite) TestOutdatedInfoSchema(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx)")
c.Assert(s.domain.BindHandle().Update(false), IsNil)
tk.MustExec("truncate table mysql.bind_info")
tk.MustExec("create global binding for select * from t using select * from t use index(idx)")
}
func (s *testSuite) TestPrivileges(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustExec("create global binding for select * from t using select * from t use index(idx)")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustExec("create user test@'%'")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "test", Hostname: "%"}, nil, nil), IsTrue)
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 0)
}
func (s *testSuite) TestHintsSetEvolveTask(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")
tk.MustExec("create global binding for select * from t where a > 10 using select * from t ignore index(idx_a) where a > 10")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustQuery("select * from t use index(idx_a) where a > 0")
bindHandle := s.domain.BindHandle()
bindHandle.SaveEvolveTasksToStore()
// Verify the added Binding for evolution contains valid ID and Hint, otherwise, panic may happen.
sql, hash := parser.NormalizeDigest("select * from t where a > ?")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 2)
bind := bindData.Bindings[1]
c.Assert(bind.Status, Equals, bindinfo.PendingVerify)
c.Assert(bind.ID, Not(Equals), "")
c.Assert(bind.Hint, NotNil)
}
func (s *testSuite) TestHintsSetID(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ use_index(test.t, idx_a) */ * from t where a > 10")
bindHandle := s.domain.BindHandle()
// Verify the added Binding contains ID with restored query block.
sql, hash := parser.NormalizeDigest("select * from t where a > ?")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind := bindData.Bindings[0]
c.Assert(bind.ID, Equals, "use_index(@`sel_1` `test`.`t` `idx_a`)")
s.cleanBindingEnv(tk)
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ use_index(t, idx_a) */ * from t where a > 10")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.ID, Equals, "use_index(@`sel_1` `test`.`t` `idx_a`)")
s.cleanBindingEnv(tk)
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ use_index(@sel_1 t, idx_a) */ * from t where a > 10")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.ID, Equals, "use_index(@`sel_1` `test`.`t` `idx_a`)")
s.cleanBindingEnv(tk)
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ use_index(@qb1 t, idx_a) qb_name(qb1) */ * from t where a > 10")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.ID, Equals, "use_index(@`sel_1` `test`.`t` `idx_a`)")
s.cleanBindingEnv(tk)
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ use_index(T, IDX_A) */ * from t where a > 10")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.ID, Equals, "use_index(@`sel_1` `test`.`t` `idx_a`)")
s.cleanBindingEnv(tk)
err := tk.ExecToErr("create global binding for select * from t using select /*+ non_exist_hint() */ * from t")
c.Assert(terror.ErrorEqual(err, parser.ErrWarnOptimizerHintParseError), IsTrue)
tk.MustExec("create global binding for select * from t where a > 10 using select * from t where a > 10")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.ID, Equals, "")
}
func (s *testSuite) TestCapturePlanBaselineIgnoreTiFlash(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key(a), key(b))")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select * from t")
tk.MustExec("select * from t")
// Create virtual tiflash replica info.
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
db, exists := is.SchemaByName(model.NewCIStr("test"))
c.Assert(exists, IsTrue)
for _, tblInfo := range db.Tables {
if tblInfo.Name.L == "t" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
// Here the plan is the TiFlash plan.
rows := tk.MustQuery("explain select * from t").Rows()
c.Assert(fmt.Sprintf("%v", rows[len(rows)-1][2]), Equals, "cop[tiflash]")
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("admin capture bindings")
// Don't have the TiFlash plan even we have TiFlash replica.
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][0], Equals, "select * from t")
c.Assert(rows[0][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `t`")
}
func (s *testSuite) TestNotEvolvePlanForReadStorageHint(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b))")
tk.MustExec("insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10)")
tk.MustExec("analyze table t")
// Create virtual tiflash replica info.
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
db, exists := is.SchemaByName(model.NewCIStr("test"))
c.Assert(exists, IsTrue)
for _, tblInfo := range db.Tables {
if tblInfo.Name.L == "t" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
// Make sure the best plan of the SQL is use TiKV index.
tk.MustExec("set @@session.tidb_executor_concurrency = 4;")
rows := tk.MustQuery("explain select * from t where a >= 11 and b >= 11").Rows()
c.Assert(fmt.Sprintf("%v", rows[len(rows)-1][2]), Equals, "cop[tikv]")
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 using select /*+ read_from_storage(tiflash[t]) */ * from t where a >= 1 and b >= 1")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
// Even if index of TiKV has lower cost, it chooses TiFlash.
rows = tk.MustQuery("explain select * from t where a >= 11 and b >= 11").Rows()
c.Assert(fmt.Sprintf("%v", rows[len(rows)-1][2]), Equals, "cop[tiflash]")
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
// None evolve task, because of the origin binding is a read_from_storage binding.
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][1], Equals, "select /*+ read_from_storage(tiflash[t]) */ * from t where a >= 1 and b >= 1")
c.Assert(rows[0][3], Equals, "using")
}
func (s *testSuite) TestBindingWithIsolationRead(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b))")
tk.MustExec("insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8), (9,9), (10,10)")
tk.MustExec("analyze table t")
// Create virtual tiflash replica info.
dom := domain.GetDomain(tk.Se)
is := dom.InfoSchema()
db, exists := is.SchemaByName(model.NewCIStr("test"))
c.Assert(exists, IsTrue)
for _, tblInfo := range db.Tables {
if tblInfo.Name.L == "t" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 using select * from t use index(idx_a) where a >= 1 and b >= 1")
tk.MustExec("set @@tidb_use_plan_baselines = 1")
rows := tk.MustQuery("explain select * from t where a >= 11 and b >= 11").Rows()
c.Assert(rows[len(rows)-1][2], Equals, "cop[tikv]")
// Even if we build a binding use index for SQL, but after we set the isolation read for TiFlash, it choose TiFlash instead of index of TiKV.
tk.MustExec("set @@tidb_isolation_read_engines = \"tiflash\"")
rows = tk.MustQuery("explain select * from t where a >= 11 and b >= 11").Rows()
c.Assert(rows[len(rows)-1][2], Equals, "cop[tiflash]")
}
func (s *testSuite) TestReCreateBindAfterEvolvePlan(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, index idx_a(a), index idx_b(b), index idx_c(c))")
tk.MustExec("insert into t values (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5)")
tk.MustExec("analyze table t")
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 using select * from t use index(idx_a) where a >= 1 and b >= 1")
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
// It cannot choose table path although it has lowest cost.
tk.MustQuery("select * from t where a >= 0 and b >= 0")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_a")
tk.MustExec("admin flush bindings")
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 2)
c.Assert(rows[1][1], Equals, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t` WHERE `a`>=0 AND `b`>=0")
c.Assert(rows[1][3], Equals, "pending verify")
tk.MustExec("create global binding for select * from t where a >= 1 and b >= 1 using select * from t use index(idx_b) where a >= 1 and b >= 1")
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
tk.MustQuery("select * from t where a >= 4 and b >= 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
}
func (s *testSuite) TestInvisibleIndex(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, unique idx_a(a), index idx_b(b) invisible)")
tk.MustGetErrMsg(
"create global binding for select * from t using select * from t use index(idx_b) ",
"[planner:1176]Key 'idx_b' doesn't exist in table 't'")
// Create bind using index
tk.MustExec("create global binding for select * from t using select * from t use index(idx_a) ")
tk.MustQuery("select * from t")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_a")
c.Assert(tk.MustUseIndex("select * from t", "idx_a(a)"), IsTrue)
tk.MustExec(`prepare stmt1 from 'select * from t'`)
tk.MustExec("execute stmt1")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 1)
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_a")
// And then make this index invisible
tk.MustExec("alter table t alter index idx_a invisible")
tk.MustQuery("select * from t")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 0)
tk.MustExec("execute stmt1")
c.Assert(len(tk.Se.GetSessionVars().StmtCtx.IndexNames), Equals, 0)
tk.MustExec("drop binding for select * from t")
}
func (s *testSuite) TestBindingSource(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")
// Test Source for SQL created sql
tk.MustExec("create global binding for select * from t where a > 10 using select * from t ignore index(idx_a) where a > 10")
bindHandle := s.domain.BindHandle()
sql, hash := parser.NormalizeDigest("select * from t where a > ?")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind := bindData.Bindings[0]
c.Assert(bind.Source, Equals, bindinfo.Manual)
// Test Source for evolved sql
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustQuery("select * from t where a > 10")
bindHandle.SaveEvolveTasksToStore()
sql, hash = parser.NormalizeDigest("select * from t where a > ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 2)
bind = bindData.Bindings[1]
c.Assert(bind.Source, Equals, bindinfo.Evolve)
tk.MustExec("set @@tidb_evolve_plan_baselines=0")
// Test Source for captured sqls
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("set @@tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("set @@tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("admin capture bindings")
bindHandle.CaptureBaselines()
sql, hash = parser.NormalizeDigest("select * from t where a < ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a < ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.Source, Equals, bindinfo.Capture)
}
func (s *testSuite) TestSPMHitInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(id int)")
tk.MustExec("create table t2(id int)")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "HashJoin"), IsTrue)
c.Assert(tk.HasPlan("SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("SELECT * from t1,t2 where t1.id = t2.id")
tk.MustQuery(`select @@last_plan_from_binding;`).Check(testkit.Rows("0"))
tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id")
c.Assert(tk.HasPlan("SELECT * from t1,t2 where t1.id = t2.id", "MergeJoin"), IsTrue)
tk.MustExec("SELECT * from t1,t2 where t1.id = t2.id")
tk.MustQuery(`select @@last_plan_from_binding;`).Check(testkit.Rows("1"))
tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id")
}
func (s *testSuite) TestIssue19836(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, key (a));")
tk.MustExec("CREATE SESSION BINDING FOR select * from t where a = 1 limit 5, 5 USING select * from t ignore index (a) where a = 1 limit 5, 5;")
tk.MustExec("PREPARE stmt FROM 'select * from t where a = 40 limit ?, ?';")
tk.MustExec("set @a=1;")
tk.MustExec("set @b=2;")
tk.MustExec("EXECUTE stmt USING @a, @b;")
tk.Se.SetSessionManager(&mockSessionManager{
PS: []*util.ProcessInfo{tk.Se.ShowProcess()},
})
explainResult := testkit.Rows(
"Limit_8 2.00 0 root time:0s, loops:0 offset:1, count:2 N/A N/A",
"โโTableReader_14 3.00 0 root time:0s, loops:0 data:Limit_13 N/A N/A",
" โโLimit_13 3.00 0 cop[tikv] offset:0, count:3 N/A N/A",
" โโSelection_12 3.00 0 cop[tikv] eq(test.t.a, 40) N/A N/A",
" โโTableFullScan_11 3000.00 0 cop[tikv] table:t keep order:false, stats:pseudo N/A N/A",
)
tk.MustQuery("explain for connection " + strconv.FormatUint(tk.Se.ShowProcess().ID, 10)).Check(explainResult)
}
func (s *testSuite) TestReCreateBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index idx(a))")
tk.MustQuery("select * from mysql.bind_info").Check(testkit.Rows())
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("create global binding for select * from t using select * from t")
tk.MustQuery("select original_sql, status from mysql.bind_info").Check(testkit.Rows(
"select * from t using",
))
rows := tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][0], Equals, "select * from t")
c.Assert(rows[0][3], Equals, "using")
tk.MustExec("create global binding for select * from t using select * from t")
tk.MustQuery("select original_sql, status from mysql.bind_info").Check(testkit.Rows(
"select * from t using",
))
rows = tk.MustQuery("show global bindings").Rows()
c.Assert(len(rows), Equals, 1)
c.Assert(rows[0][0], Equals, "select * from t")
c.Assert(rows[0][3], Equals, "using")
}
func (s *testSuite) TestDMLIndexHintBind(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("create table t(a int, b int, c int, key idx_b(b), key idx_c(c))")
tk.MustExec("delete from t where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_b")
c.Assert(tk.MustUseIndex("delete from t where b = 1 and c > 1", "idx_b(b)"), IsTrue)
tk.MustExec("create global binding for delete from t where b = 1 and c > 1 using delete from t use index(idx_c) where b = 1 and c > 1")
tk.MustExec("delete from t where b = 1 and c > 1")
c.Assert(tk.Se.GetSessionVars().StmtCtx.IndexNames[0], Equals, "t:idx_c")
c.Assert(tk.MustUseIndex("delete from t where b = 1 and c > 1", "idx_c(c)"), IsTrue)
}
| [
"\"log_level\""
]
| []
| [
"log_level"
]
| [] | ["log_level"] | go | 1 | 0 | |
btcdet/datasets/finddiff.py | import copy
import pickle
import sys
from pathlib import Path
from sklearn.cluster import KMeans, DBSCAN
import numpy as np
from skimage import io
import mayavi.mlab as mlab
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from ..utils import box_utils, calibration_kitti, common_utils, object3d_kitti, point_box_utils
from .dataset import DatasetTemplate
import torch
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
sys.path.append('/home/xharlie/dev/match2det/tools/visual_utils')
import visualize_utils as vu
from PIL import ImageColor
from ..ops.chamfer_distance import ChamferDistance
from ..ops.iou3d_nms import iou3d_nms_utils
chamfer_dist = ChamferDistance()
NUM_POINT_FEATURES = 4
def extract_allpnts(root_path=None, splits=['train','val']):
all_db_infos_lst = []
box_dims_lst = []
pnts_lst = []
mirrored_pnts_lst = []
for split in splits:
db_info_save_path = Path(root_path) / ('kitti_dbinfos_%s.pkl' % split)
with open(db_info_save_path, 'rb') as f:
all_db_infos = pickle.load(f)['Car']
for k in range(len(all_db_infos)):
info = all_db_infos[k]
obj_type = info['name']
if obj_type != "Car":
continue
gt_box = info['box3d_lidar']
all_db_infos_lst.append(info)
return all_db_infos_lst
if __name__ == '__main__':
PNT_THRESH = 400
ROOT_DIR = (Path(__file__).resolve().parent / '../../').resolve()
print("ROOT_DIR", ROOT_DIR)
path = ROOT_DIR / 'data' / 'kitti' / 'detection3d'
match_info_save_path = path / "match_maxdist_10extcrdsnum_info_car.pkl"
cluster_num = 20
voxel_size = [0.08, 0.08, 0.08]
all_db_infos_lst = extract_allpnts(
root_path=path, splits=['train','val']
)
range_all = np.zeros([18, 3])
x_all = np.zeros([18, 3])
y_all = np.zeros([18, 3])
diff_count = np.array([0,0,0])
diff_dist = np.array([0,0,0])
# db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
# 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
# 'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
for info in all_db_infos_lst:
diff = info['difficulty']
if diff > -1:
box = info['box3d_lidar']
dist = np.linalg.norm(box[:3])
ind = int(dist/5)
xind = int(box[0]/5)
yind = int((box[1] + 40) / 5)
# print("diff", diff, "dist", dist, "ind", ind)
range_all[ind, diff] = range_all[ind, diff] + 1
x_all[xind, diff] = x_all[xind, diff] + 1
y_all[yind, diff] = y_all[yind, diff] + 1
diff_count[diff] += 1
diff_dist[diff] += dist
print("avg: ", diff_dist/diff_count)
print("breakdown: ", range_all)
print("x breakdown: ", x_all)
print("y breakdown: ", y_all)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/manager/main.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"github.com/richardcase/itsrandomoperator/pkg/apis"
"github.com/richardcase/itsrandomoperator/pkg/controller"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
)
func main() {
logf.SetLogger(logf.ZapLogger(false))
log := logf.Log.WithName("entrypoint")
namespace := os.Getenv("POD_NAMESPACE")
if len(namespace) == 0 {
log.Info("must set env POD_NAMESPACE")
return
}
// Get a config to talk to the apiserver
log.Info("setting up client for manager")
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "unable to set up client config")
os.Exit(1)
}
// Create a new Cmd to provide shared dependencies and start components
log.Info("setting up manager")
mgr, err := manager.New(cfg, manager.Options{
LeaderElection: true,
LeaderElectionID: "rpa-operator",
LeaderElectionNamespace: namespace,
})
if err != nil {
log.Error(err, "unable to set up overall controller manager")
os.Exit(1)
}
log.Info("Registering Components.")
// Setup Scheme for all resources
log.Info("setting up scheme")
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
log.Error(err, "unable add APIs to scheme")
os.Exit(1)
}
// Setup all Controllers
log.Info("Setting up controller")
if err := controller.AddToManager(mgr); err != nil {
log.Error(err, "unable to register controllers to the manager")
os.Exit(1)
}
// Start the Cmd
log.Info("Starting the Cmd.")
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
log.Error(err, "unable to run the manager")
os.Exit(1)
}
}
| [
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE"
]
| [] | ["POD_NAMESPACE"] | go | 1 | 0 | |
data/import.py | import django
import json
import os
import sys
# Make sure we can see the parent directory to import
sys.path.append('../')
os.environ['DJANGO_SETTINGS_MODULE'] = 'shipwrecks.settings'
# Make sure Django is set up
django.setup()
# Now we can import our Django model(s)
from wrecks.models import Wreck
from wrecks.models import WreckType
from wrecks.models import SOURCE_CHOICES
# Import the GEOS library needed to create points
from django.contrib.gis.geos import Point
from django.contrib.gis.geos import GEOSGeometry
if __name__ == '__main__':
# Make sure we have specified a file to import
if len(sys.argv) < 2:
print 'You must specify a geojson file to import.'
print 'Usage: $ python import.py <geojson file>'
sys.exit()
# Open the GeoJSON file
json_filepath = sys.argv[-1]
try:
with open(json_filepath, 'r') as f:
data = json.loads(f.read())
except IOError:
sys.exit("Error opening GeoJSON file")
except ValueError:
sys.exit('Error: the file does not appear to be valid JSON.')
# Turn each feature into a Wreck model instance
for feature_dict in data['features']:
wreck = Wreck()
properties = feature_dict['properties']
# Figure out the source type
source_name = properties['source']
if source_name == 'enc_wrecks':
source = SOURCE_CHOICES[1][0]
else:
source = SOURCE_CHOICES[0][0]
# Figure out if the wreck type exists (and normalize the values)
wreck_type_value = properties['feature_type']
if not wreck_type_value:
wreck_type_value = 'Unknown'
else:
if wreck_type_value.startswith('Wrecks -'):
wreck_type_value = wreck_type_value.replace('Wrecks -', 'Wreck -')
wreck_type, created = WreckType.objects.get_or_create(name=wreck_type_value)
# Figure out the depth
if source_name == 'enc_wrecks':
# ENC Wrecks are always in meters
try:
depth_meters = float(properties['depth'])
except ValueError:
depth_meters = None
else:
if not properties['depth']:
depth_meters = None
else:
depth_value = properties['depth']
sounding = properties['sounding']
if 'meters' in sounding:
depth_meters = depth_value
elif 'feet' in sounding:
# Convert feet and tenths to meters
depth_meters = depth_value * 0.3048
elif 'fathoms' in sounding:
# Convert fathoms to meters
depth_meters = depth_value * 1.8288
else:
depth_meters = None
# Create the Point object from the lat and long
lat = feature_dict['geometry']['coordinates'][1]
lng = feature_dict['geometry']['coordinates'][0]
location_point = GEOSGeometry('POINT(%f %f)' % (lng, lat), srid='NADS83')
# Get the name or assign 'unknown'
vessel_name = properties['vessel_name']
if not vessel_name:
vessel_name = 'Unknown'
# Cast the year sunk into an integer
try:
year_sunk = int(properties['yearsunk'])
except ValueError:
year_sunk = None
wreck.name = vessel_name
wreck.history = properties['history']
wreck.wreck_type = wreck_type
wreck.year_sunk = year_sunk
wreck.source = source
wreck.source_identifier = feature_dict['id']
wreck.depth_meters = depth_meters
wreck.location = location_point
# Save the new wreck
wreck.save()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend_test.go | package pg
// Create the test database: createdb terraform_backend_pg_test
// TF_ACC=1 GO111MODULE=on go test -v -mod=vendor -timeout=2m -parallel=4 github.com/hashicorp/terraform/backend/remote-state/pg
import (
"database/sql"
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/states/remote"
"github.com/lib/pq"
_ "github.com/lib/pq"
)
// Function to skip a test unless in ACCeptance test mode.
//
// A running Postgres server identified by env variable
// DATABASE_URL is required for acceptance tests.
func testACC(t *testing.T) {
skip := os.Getenv("TF_ACC") == ""
if skip {
t.Log("pg backend tests require setting TF_ACC")
t.Skip()
}
if os.Getenv("DATABASE_URL") == "" {
os.Setenv("DATABASE_URL", "postgres://localhost/terraform_backend_pg_test?sslmode=disable")
}
}
func TestBackend_impl(t *testing.T) {
var _ backend.Backend = new(Backend)
}
func TestBackendConfig(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
schemaName := pq.QuoteIdentifier(fmt.Sprintf("terraform_%s", t.Name()))
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
})
schemaName = pq.QuoteIdentifier(schemaName)
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if b == nil {
t.Fatal("Backend could not be configured")
}
_, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName))
if err != nil {
t.Fatal(err)
}
_, err = b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
s, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
c := s.(*remote.State).Client.(*RemoteClient)
if c.Name != backend.DefaultStateName {
t.Fatal("RemoteClient name is not configured")
}
backend.TestBackendStates(t, b)
}
func TestBackendConfigSkipOptions(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
testCases := []struct {
Name string
SkipSchemaCreation bool
SkipTableCreation bool
SkipIndexCreation bool
Setup func(t *testing.T, db *sql.DB, schemaName string)
}{
{
Name: "skip_schema_creation",
SkipSchemaCreation: true,
Setup: func(t *testing.T, db *sql.DB, schemaName string) {
// create the schema as a prerequisites
_, err := db.Query(fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s`, schemaName))
if err != nil {
t.Fatal(err)
}
},
},
{
Name: "skip_table_creation",
SkipTableCreation: true,
Setup: func(t *testing.T, db *sql.DB, schemaName string) {
// since the table needs to be already created the schema must be too
_, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName))
if err != nil {
t.Fatal(err)
}
_, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s (
id SERIAL PRIMARY KEY,
name TEXT,
data TEXT
)`, schemaName, statesTableName))
if err != nil {
t.Fatal(err)
}
},
},
{
Name: "skip_index_creation",
SkipIndexCreation: true,
Setup: func(t *testing.T, db *sql.DB, schemaName string) {
// Everything need to exists for the index to be created
_, err := db.Query(fmt.Sprintf(`CREATE SCHEMA %s`, schemaName))
if err != nil {
t.Fatal(err)
}
_, err = db.Query(fmt.Sprintf(`CREATE TABLE %s.%s (
id SERIAL PRIMARY KEY,
name TEXT,
data TEXT
)`, schemaName, statesTableName))
if err != nil {
t.Fatal(err)
}
_, err = db.Exec(fmt.Sprintf(`CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)`, statesIndexName, schemaName, statesTableName))
if err != nil {
t.Fatal(err)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
schemaName := tc.Name
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
"skip_schema_creation": tc.SkipSchemaCreation,
"skip_table_creation": tc.SkipTableCreation,
"skip_index_creation": tc.SkipIndexCreation,
})
schemaName = pq.QuoteIdentifier(schemaName)
db, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
tc.Setup(t, db, schemaName)
defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if b == nil {
t.Fatal("Backend could not be configured")
}
// Make sure everything has been created
// This tests that both the schema and the table have been created
_, err = b.db.Query(fmt.Sprintf("SELECT name, data FROM %s.%s LIMIT 1", schemaName, statesTableName))
if err != nil {
t.Fatal(err)
}
// Make sure that the index exists
query := `select count(*) from pg_indexes where schemaname=$1 and tablename=$2 and indexname=$3;`
var count int
if err := b.db.QueryRow(query, tc.Name, statesTableName, statesIndexName).Scan(&count); err != nil {
t.Fatal(err)
}
if count != 1 {
t.Fatalf("The index has not been created (%d)", count)
}
_, err = b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
s, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
c := s.(*remote.State).Client.(*RemoteClient)
if c.Name != backend.DefaultStateName {
t.Fatal("RemoteClient name is not configured")
}
})
}
}
func TestBackendStates(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
testCases := []string{
fmt.Sprintf("terraform_%s", t.Name()),
fmt.Sprintf("test with spaces: %s", t.Name()),
}
for _, schemaName := range testCases {
t.Run(schemaName, func(t *testing.T) {
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query("DROP SCHEMA IF EXISTS %s CASCADE", pq.QuoteIdentifier(schemaName))
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
})
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if b == nil {
t.Fatal("Backend could not be configured")
}
backend.TestBackendStates(t, b)
})
}
}
func TestBackendStateLocks(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
schemaName := fmt.Sprintf("terraform_%s", t.Name())
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
})
b := backend.TestBackendConfig(t, New(), config).(*Backend)
if b == nil {
t.Fatal("Backend could not be configured")
}
bb := backend.TestBackendConfig(t, New(), config).(*Backend)
if bb == nil {
t.Fatal("Backend could not be configured")
}
backend.TestBackendStateLocks(t, b, bb)
}
func getDatabaseUrl() string {
return os.Getenv("DATABASE_URL")
}
| [
"\"TF_ACC\"",
"\"DATABASE_URL\"",
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL",
"TF_ACC"
]
| [] | ["DATABASE_URL", "TF_ACC"] | go | 2 | 0 | |
poller/lambda/cdk/src/main/java/com/robwettach/webdiplomacy/poller/lambda/cdk/Main.java | package com.robwettach.webdiplomacy.poller.lambda.cdk;
import software.amazon.awscdk.core.App;
import software.amazon.awscdk.services.lambda.Code;
/**
* CDK entrypoint.
*/
public class Main {
/**
* CDK entrypoint.
*
* @param args Command-line arguments
*/
public static void main(final String[] args) {
App app = new App();
Code lambdaPollerCode = Code.fromAsset(System.getenv("POLLER_LAMBDA_ZIP"));
new WebDiplomacyPollerLambdaStack(
app,
"WebDiplomacyPoller",
new WebDiplomacyPollerLambdaStack.Props(lambdaPollerCode));
app.synth();
}
}
| [
"\"POLLER_LAMBDA_ZIP\""
]
| []
| [
"POLLER_LAMBDA_ZIP"
]
| [] | ["POLLER_LAMBDA_ZIP"] | java | 1 | 0 | |
src/nr/powerline/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020, Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# this software and associated documentation files (the "Software"), to deal in
# Software without restriction, including without limitation the rights to use,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the
# and to permit persons to whom the Software is furnished to do so, subject to
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from . import chars, server, static
from .utils import register_signal_handler, try_remove
from nr import ansiterm as ansi
from nr.interface import Interface
from nr.sumtype import Constructor, Sumtype
from nr.utils.process import process_exists, process_terminate, replace_stdio, spawn_daemon
from typing import Iterable, Optional, Sequence, TextIO, Union
import argparse
import io
import json
import logging
import os
import nr.databind.core, nr.databind.json
import signal
import sys
__author__ = 'Niklas Rosenstein <[email protected]>'
__version__ = '0.1.2'
logger = logging.getLogger(__name__)
class Pen(Sumtype):
Text = Constructor('text,style')
Flipchar = Constructor('char')
def render(
pen_sequence: Sequence[Pen],
fp: TextIO = None,
escape_unprintable: bool = False
) -> Optional[str]:
r"""
Render a sequence of #Pen instructions to *fp*, or returns it as a string.
If *escape_unprintable* is enabled, unprintable characters will be wrapped
in `\[` and `\]` to allow the shell to properly count the width of the
resulting string.
"""
if fp is None:
fp = io.StringIO()
return_result = True
else:
return_result = False
def _find_next_bg(offset: int) -> Optional[ansi.Color]:
for pen in pen_sequence[offset:]: # TODO (@NiklasRosenstein): islice()?
if isinstance(pen, Pen.Text):
return pen.style.bg
return None
style = ansi.Style()
for index, pen in enumerate(pen_sequence):
if isinstance(pen, Pen.Flipchar):
new_bg = _find_next_bg(index+1) or ansi.SgrColor('DEFAULT')
if new_bg == style.bg:
# Note: This is more of a hack in cases where two plugins
# have the same background color, rendering the common
# RIGHT_TRIANGLE flipchar invisible.
text = chars.RIGHT_TRIANGLE_THIN
style = ansi.Style(None, new_bg)
else:
style = ansi.Style(style.bg, new_bg)
text = pen.char
elif isinstance(pen, Pen.Text):
style = pen.style or style
text = pen.text
else:
raise TypeError('expected Pen object, got {!r}'.format(
type(pen).__name__))
if escape_unprintable:
fp.write('\\[')
fp.write(str(style))
if escape_unprintable:
fp.write('\\]')
fp.write(text)
if escape_unprintable:
fp.write('\\[')
fp.write(str(ansi.Attribute.RESET))
if escape_unprintable:
fp.write('\\]')
if return_result:
return fp.getvalue()
return None
class PowerlineContext:
def __init__(self,
path: str,
exit_code: int = 0,
default_style: ansi.Style = None,
env: dict = None,
is_server: bool = False):
self.path = path
self.exit_code = exit_code
self.default_style = default_style or ansi.parse_style('white blue')
self.env = os.environ if env is None else env
self.is_server = is_server
def getenv(self, name: str, default: str = None) -> Optional[str]:
return self.env.get(name, default)
class AnsiModule(nr.databind.core.Module):
def __init__(self):
super().__init__()
self.register(ansi.Color, nr.databind.core.IDeserializer(
deserialize=lambda m, n: ansi.parse_color(n.value)))
self.register(ansi.Style, nr.databind.core.IDeserializer(
deserialize=lambda m, n: ansi.parse_style(n.value)))
@nr.databind.core.SerializeAs(nr.databind.core.UnionType
.with_entrypoint_resolver('nr.powerline.plugins'))
class PowerlinePlugin(Interface):
def render(self, context: PowerlineContext) -> Iterable[Pen]:
...
class Powerline(nr.databind.core.Struct):
plugins = nr.databind.core.Field([PowerlinePlugin])
default_style = nr.databind.core.Field(ansi.Style,
nr.databind.core.FieldName('default-style'), default=None)
def render(self,
context: PowerlineContext,
fp: TextIO = None,
escape_unprintable: bool = False
) -> Optional[str]:
pens = []
for plugin in self.plugins:
pens += plugin.render(context)
return render(pens, fp, escape_unprintable)
def load_powerline(*try_files: str, default: Union[dict, Powerline] = None) -> Optional[Powerline]:
mapper = nr.databind.core.ObjectMapper(
AnsiModule(),
nr.databind.json.JsonModule(),
)
for filename in try_files:
if os.path.isfile(filename):
with open(filename) as fp:
data = json.load(fp)
return mapper.deserialize(data, Powerline, filename=filename)
if isinstance(default, dict):
default = mapper.deserialize(default, Powerline, filename='<default>')
return default
def main(argv=None):
"""
Entrypoint for nr-powerline.
"""
parser = argparse.ArgumentParser()
parser.add_argument('exit_code', type=int, nargs='?')
parser.add_argument('-f', '--file')
parser.add_argument('-e', '--escape', action='store_true')
parser.add_argument('--run-dir', default=None)
parser.add_argument('--start', action='store_true')
parser.add_argument('--stop', action='store_true')
parser.add_argument('--status', action='store_true')
parser.add_argument('--fake-server', action='store_true')
parser.add_argument('--exit-code', action='store_true')
parser.add_argument('--src', choices=('bash',))
args = parser.parse_args(argv)
logging.basicConfig(format='[%(asctime)s - %(levelname)s]: %(message)s', level=logging.INFO)
powerline = load_powerline(
args.file or os.path.expanduser('~/.local/powerline/config.json'),
default=static.default_powerline)
context = PowerlineContext(
os.getcwd(),
args.exit_code or 0,
default_style=powerline.default_style,
is_server=args.fake_server)
if args.src == 'bash':
print(static.bash_src)
sys.exit(0)
elif args.src:
parser.error('unexpected argument for --src: {!r}'.format(args.src))
if not args.start and not args.stop and not args.status:
print(powerline.render(context, escape_unprintable=args.escape), end='')
return
run_dir = args.run_dir or os.path.expanduser('~/.local/powerline')
log_file = os.path.join(run_dir, 'daemon.log')
pid_file = os.path.join(run_dir, 'daemon.pid')
socket_file = os.path.join(run_dir, 'daemon.sock')
if os.path.isfile(pid_file):
with open(pid_file) as fp:
daemon_pid = int(fp.read().strip())
else:
daemon_pid = None
if args.stop and daemon_pid:
logger.info('Stopping %d', daemon_pid)
process_terminate(daemon_pid)
if args.start:
if os.path.exists(socket_file):
os.remove(socket_file)
def run(powerline, stdout):
with open(pid_file, 'w') as fp:
fp.write(str(os.getpid()))
logger.info('Started %d', os.getpid())
register_signal_handler('SIGINT', lambda *a: try_remove(pid_file))
replace_stdio(None, stdout, stdout)
conf = server.Address.UnixFile(socket_file)
server.PowerlineServer(conf, powerline).run_forever()
logger.info('Bye bye')
os.makedirs(run_dir, exist_ok=True)
stdout = open(log_file, 'a')
spawn_daemon(lambda: run(powerline, stdout))
if args.status:
if not daemon_pid or not process_exists(daemon_pid):
if args.exit_code:
sys.exit(7)
print('stopped')
else:
if args.exit_code:
sys.exit(0)
print('running')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
fstest/fstest.go | // Package fstest provides utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/pingme998/rclone/fs"
"github.com/pingme998/rclone/fs/accounting"
"github.com/pingme998/rclone/fs/config"
"github.com/pingme998/rclone/fs/config/configfile"
"github.com/pingme998/rclone/fs/hash"
"github.com/pingme998/rclone/fs/walk"
"github.com/pingme998/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Globals
var (
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
// SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
// ListRetries is the number of times to retry a listing to overcome eventual consistency
ListRetries = flag.Int("list-retries", 3, "Number or times to retry listing")
// MatchTestRemote matches the remote names used for testing
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Initialise rclone for testing
func Initialise() {
ctx := context.Background()
ci := fs.GetConfig(ctx)
// Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
ci.AskPassword = false
// Override the config file from the environment - we don't
// parse the flags any more so this doesn't happen
// automatically
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
_ = config.SetConfigPath(envConfig)
}
configfile.Install()
accounting.Start(ctx)
if *Verbose {
ci.LogLevel = fs.LogLevelDebug
}
if *DumpHeaders {
ci.Dump |= fs.DumpHeaders
}
if *DumpBodies {
ci.Dump |= fs.DumpBodies
}
ci.LowLevelRetries = *LowLevelRetries
ci.UseListR = *UseListR
}
// Item represents an item for checking
type Item struct {
Path string
Hashes map[hash.Type]string
ModTime time.Time
Size int64
}
// NewItem creates an item from a string content
func NewItem(Path, Content string, modTime time.Time) Item {
i := Item{
Path: Path,
ModTime: modTime,
Size: int64(len(Content)),
}
hash := hash.NewMultiHasher()
buf := bytes.NewBufferString(Content)
_, err := io.Copy(hash, buf)
if err != nil {
log.Fatalf("Failed to create item: %v", err)
}
i.Hashes = hash.Sums()
return i
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// AssertTimeEqualWithPrecision checks that want is within precision
// of got, asserting that with t and logging remote
func AssertTimeEqualWithPrecision(t *testing.T, remote string, want, got time.Time, precision time.Duration) {
dt, ok := CheckTimeEqualWithPrecision(want, got, precision)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (want %s vs got %s) (precision %s)", remote, dt, precision, want, got, precision))
}
// CheckModTime checks the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
AssertTimeEqualWithPrecision(t, obj.Remote(), i.ModTime, modTime, precision)
}
// CheckHashes checks all the hashes the object supports are correct
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
require.NotNil(t, obj)
types := obj.Fs().Hashes().Array()
for _, Hash := range types {
// Check attributes
sum, err := obj.Hash(context.Background(), Hash)
require.NoError(t, err)
assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
i.CheckHashes(t, obj)
assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision)
}
// Normalize runs a utf8 normalization on the string if running on OS
// X. This is because OS X denormalizes file names it writes to the
// local file system.
func Normalize(name string) string {
if runtime.GOOS == "darwin" {
name = norm.NFC.String(name)
}
return name
}
// Items represents all items for checking
type Items struct {
byName map[string]*Item
byNameAlt map[string]*Item
items []Item
}
// NewItems makes an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
byNameAlt: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[Normalize(items[i].Path)] = &items[i]
}
return is
}
// Find checks off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
remote := Normalize(obj.Remote())
i, ok := is.byName[remote]
if !ok {
i, ok = is.byNameAlt[remote]
assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
}
if i != nil {
delete(is.byName, i.Path)
i.Check(t, obj, precision)
}
}
// Done checks all finished
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
t.Logf("Not found %q", name)
}
}
assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
}
// makeListingFromItems returns a string representation of the items
//
// it returns two possible strings, one normal and one for windows
func makeListingFromItems(items []Item) string {
nameLengths := make([]string, len(items))
for i, item := range items {
remote := Normalize(item.Path)
nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size)
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// makeListingFromObjects returns a string representation of the objects
func makeListingFromObjects(objs []fs.Object) string {
nameLengths := make([]string, len(objs))
for i, obj := range objs {
nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// filterEmptyDirs removes any empty (or containing only directories)
// directories from expectedDirs
func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
dirs := map[string]struct{}{"": {}}
for _, item := range items {
base := item.Path
for {
base = path.Dir(base)
if base == "." || base == "/" {
break
}
dirs[base] = struct{}{}
}
}
for _, expectedDir := range expectedDirs {
if _, found := dirs[expectedDir]; found {
newExpectedDirs = append(newExpectedDirs, expectedDir)
} else {
t.Logf("Filtering empty directory %q", expectedDir)
}
}
return newExpectedDirs
}
// CheckListingWithRoot checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
//
// dir is the directory used for the listing.
func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) {
if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
expectedDirs = filterEmptyDirs(t, items, expectedDirs)
}
is := NewItems(items)
ctx := context.Background()
oldErrors := accounting.Stats(ctx).GetErrors()
var objs []fs.Object
var dirs []fs.Directory
var err error
var retries = *ListRetries
sleep := time.Second / 2
wantListing := makeListingFromItems(items)
gotListing := "<unset>"
listingOK := false
for i := 1; i <= retries; i++ {
objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
if err != nil && err != fs.ErrorDirNotFound {
t.Fatalf("Error listing: %v", err)
}
gotListing = makeListingFromObjects(objs)
listingOK = wantListing == gotListing
if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
// Put an extra sleep in if we did any retries just to make sure it really
// is consistent (here is looking at you Amazon Drive!)
if i != 1 {
extraSleep := 5*time.Second + sleep
t.Logf("Sleeping for %v just to make sure", extraSleep)
time.Sleep(extraSleep)
}
break
}
sleep *= 2
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
time.Sleep(sleep)
if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
t.Logf("Flushing the directory cache")
doDirCacheFlush()
}
}
assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s got\n %s", wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 {
accounting.Stats(ctx).ResetErrors()
}
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
}
}
// CheckListingWithPrecision checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
CheckListingWithRoot(t, f, "", items, expectedDirs, precision)
}
// CheckListing checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, nil, precision)
}
// CheckItems checks the fs to see if it has only the items passed in
// using a precision of fs.Config.ModifyWindow
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(context.TODO(), f))
}
// CompareItems compares a set of DirEntries to a slice of items and a list of dirs
// The modtimes are compared with the precision supplied
func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) {
is := NewItems(items)
var objs []fs.Object
var dirs []fs.Directory
wantListing := makeListingFromItems(items)
for _, entry := range entries {
switch x := entry.(type) {
case fs.Directory:
dirs = append(dirs, x)
case fs.Object:
objs = append(objs, x)
// do nothing
default:
t.Fatalf("unknown object type %T", entry)
}
}
gotListing := makeListingFromObjects(objs)
listingOK := wantListing == gotListing
assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n %s got\n %s", what, wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal")
}
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// RandomRemoteName makes a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = "rclone-test-" + random.String(24)
if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName)
}
remoteName += leafName
}
return remoteName, leafName, nil
}
// RandomRemote makes a random bucket or subdirectory on the remote
// from the -remote parameter
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
//
// Returns the remote, its url, a finaliser and an error
func RandomRemote() (fs.Fs, string, func(), error) {
var err error
var parentRemote fs.Fs
remoteName := *RemoteName
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, "", nil, err
}
remote, err := fs.NewFs(context.Background(), remoteName)
if err != nil {
return nil, "", nil, err
}
finalise := func() {
Purge(remote)
if parentRemote != nil {
Purge(parentRemote)
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, remoteName, finalise, nil
}
// Purge is a simplified re-implementation of operations.Purge for the
// test routine cleanup to avoid circular dependencies.
//
// It logs errors rather than returning them
func Purge(f fs.Fs) {
ctx := context.Background()
var err error
doFallbackPurge := true
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
fs.Debugf(f, "Purge remote")
err = doPurge(ctx, "")
if err == fs.ErrorCantPurge {
doFallbackPurge = true
}
}
if doFallbackPurge {
dirs := []string{""}
err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
var err error
entries.ForObject(func(obj fs.Object) {
fs.Debugf(f, "Purge object %q", obj.Remote())
err = obj.Remove(ctx)
if err != nil {
log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
}
})
entries.ForDir(func(dir fs.Directory) {
dirs = append(dirs, dir.Remote())
})
return nil
})
sort.Strings(dirs)
for i := len(dirs) - 1; i >= 0; i-- {
dir := dirs[i]
fs.Debugf(f, "Purge dir %q", dir)
err := f.Rmdir(ctx, dir)
if err != nil {
log.Printf("purge failed to rmdir %q: %v", dir, err)
}
}
}
if err != nil {
log.Printf("purge failed: %v", err)
}
}
| [
"\"RCLONE_CONFIG\""
]
| []
| [
"RCLONE_CONFIG"
]
| [] | ["RCLONE_CONFIG"] | go | 1 | 0 | |
internal/envconfig/envconfig.go | package envconfig
import (
"os"
"github.com/vrischmann/envconfig"
)
// Init the given config. Supports also envs prefix if set.
func Init(conf interface{}) error {
envPrefix := os.Getenv("ENVS_PREFIX")
return envconfig.InitWithPrefix(conf, envPrefix)
}
| [
"\"ENVS_PREFIX\""
]
| []
| [
"ENVS_PREFIX"
]
| [] | ["ENVS_PREFIX"] | go | 1 | 0 | |
src/main/java/com/sysdig/jenkins/plugins/sysdig/scanner/InlineScannerRemoteExecutor.java | /*
Copyright (C) 2016-2020 Sysdig
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.sysdig.jenkins.plugins.sysdig.scanner;
import com.google.common.base.Strings;
import com.sysdig.jenkins.plugins.sysdig.BuildConfig;
import com.sysdig.jenkins.plugins.sysdig.SysdigBuilder;
import com.sysdig.jenkins.plugins.sysdig.containerrunner.Container;
import com.sysdig.jenkins.plugins.sysdig.containerrunner.ContainerRunner;
import com.sysdig.jenkins.plugins.sysdig.containerrunner.ContainerRunnerFactory;
import com.sysdig.jenkins.plugins.sysdig.containerrunner.DockerClientContainerFactory;
import com.sysdig.jenkins.plugins.sysdig.log.SysdigLogger;
import hudson.EnvVars;
import hudson.remoting.Callable;
import org.jenkinsci.remoting.RoleChecker;
import java.io.*;
import java.util.*;
public class InlineScannerRemoteExecutor implements Callable<String, Exception>, Serializable {
private static final String DUMMY_ENTRYPOINT = "cat";
private static final String[] MKDIR_COMMAND = new String[]{"mkdir", "-p", "/tmp/sysdig-inline-scan/logs"};
private static final String[] TOUCH_COMMAND = new String[]{"touch", "/tmp/sysdig-inline-scan/logs/info.log"};
private static final String[] TAIL_COMMAND = new String[]{"tail", "-f", "/tmp/sysdig-inline-scan/logs/info.log"};
private static final String SCAN_COMMAND = "/sysdig-inline-scan.sh";
private static final String[] SCAN_ARGS = new String[] {
"--storage-type=docker-daemon",
"--format=JSON"};
private static final String VERBOSE_ARG = "--verbose";
private static final String SKIP_TLS_ARG = "--sysdig-skip-tls";
private static final String SYSDIG_URL_ARG = "--sysdig-url=%s";
private static final String ON_PREM_ARG = "--on-prem";
private static final String DOCKERFILE_ARG = "--dockerfile=/tmp/";
private static final String DOCKERFILE_MOUNTPOINT = "/tmp/";
private static final int STOP_SECONDS = 1;
// Use a default container runner factory, but allow overriding for mocks in tests
private static ContainerRunnerFactory containerRunnerFactory = new DockerClientContainerFactory();
public static void setContainerRunnerFactory(ContainerRunnerFactory containerRunnerFactory) {
InlineScannerRemoteExecutor.containerRunnerFactory = containerRunnerFactory;
}
private final String imageName;
private final String dockerFile;
private final BuildConfig config;
private final SysdigLogger logger;
private final EnvVars envVars;
public InlineScannerRemoteExecutor(String imageName, String dockerFile, BuildConfig config, SysdigLogger logger, EnvVars envVars) {
this.imageName = imageName;
this.dockerFile = dockerFile;
this.config = config;
this.logger = logger;
this.envVars = envVars;
}
@Override
public void checkRoles(RoleChecker checker) throws SecurityException { }
@Override
public String call() throws InterruptedException {
ContainerRunner containerRunner = containerRunnerFactory.getContainerRunner(logger, envVars);
List<String> args = new ArrayList<>();
args.add(SCAN_COMMAND);
args.addAll(Arrays.asList(SCAN_ARGS));
if (config.getDebug()) {
args.add(VERBOSE_ARG);
}
if (!config.getEngineverify()) {
args.add(SKIP_TLS_ARG);
}
args.add(imageName);
if (!config.getEngineurl().equals(SysdigBuilder.DescriptorImpl.DEFAULT_ENGINE_URL)) {
args.add(String.format(SYSDIG_URL_ARG, config.getEngineurl()));
args.add(ON_PREM_ARG);
}
List<String> containerEnvVars = new ArrayList<>();
containerEnvVars.add("SYSDIG_API_TOKEN=" + this.config.getSysdigToken());
containerEnvVars.add("SYSDIG_ADDED_BY=cicd-inline-scan");
addProxyVars(envVars, containerEnvVars, logger);
List<String> bindMounts = new ArrayList<>();
bindMounts.add("/var/run/docker.sock:/var/run/docker.sock");
logger.logDebug("System environment: " + System.getenv().toString());
logger.logDebug("Final environment: " + envVars);
logger.logDebug("Creating container with environment: " + containerEnvVars);
logger.logDebug("Bind mounts: " + bindMounts);
Container inlineScanContainer = containerRunner.createContainer(envVars.get("SYSDIG_OVERRIDE_INLINE_SCAN_IMAGE", config.getInlineScanImage()), Collections.singletonList(DUMMY_ENTRYPOINT), null, containerEnvVars, config.getRunAsUser(), bindMounts);
if (!Strings.isNullOrEmpty(dockerFile)) {
File f = new File(dockerFile);
logger.logDebug("Copying Dockerfile from " + f.getAbsolutePath() + " to " + DOCKERFILE_MOUNTPOINT + f.getName() + " inside container");
inlineScanContainer.copy(dockerFile, DOCKERFILE_MOUNTPOINT);
args.add(DOCKERFILE_ARG + f.getName());
}
if (!Strings.isNullOrEmpty(config.getInlineScanExtraParams())) {
args.addAll(Arrays.asList(config.getInlineScanExtraParams().split(" ")));
}
final StringBuilder builder = new StringBuilder();
try {
//TODO: Get exit code in run and exec?
inlineScanContainer.runAsync(frame -> this.sendToLog(logger, frame), frame -> this.sendToLog(logger, frame));
inlineScanContainer.exec(Arrays.asList(MKDIR_COMMAND), null, frame -> this.sendToLog(logger, frame), frame -> this.sendToLog(logger, frame));
inlineScanContainer.exec(Arrays.asList(TOUCH_COMMAND), null, frame -> this.sendToLog(logger, frame), frame -> this.sendToLog(logger, frame));
inlineScanContainer.execAsync(Arrays.asList(TAIL_COMMAND), null, frame -> this.sendToLog(logger, frame), frame -> this.sendToLog(logger, frame));
logger.logDebug("Executing command in container: " + args);
inlineScanContainer.exec(args, null, frame -> this.sendToBuilder(builder, frame), frame -> this.sendToDebugLog(logger, frame));
} finally {
inlineScanContainer.stop(STOP_SECONDS);
}
//TODO: For exit code 2 (wrong params), just show the output (should not happen, but just in case)
return builder.toString();
}
private void addProxyVars(EnvVars currentEnv, List<String> envVars, SysdigLogger logger) {
String http_proxy = currentEnv.get("http_proxy");
if (Strings.isNullOrEmpty(http_proxy)) {
http_proxy = currentEnv.get("HTTP_PROXY");
if (!Strings.isNullOrEmpty(http_proxy)) {
logger.logDebug("HTTP proxy setting from env var HTTP_PROXY (http_proxy empty): " + http_proxy);
}
} else {
logger.logDebug("HTTP proxy setting from env var http_proxy: " + http_proxy);
}
if (!Strings.isNullOrEmpty(http_proxy)) {
envVars.add("http_proxy=" + http_proxy);
}
String https_proxy = currentEnv.get("https_proxy");
if (Strings.isNullOrEmpty(https_proxy)) {
https_proxy = currentEnv.get("HTTPS_PROXY");
if (!Strings.isNullOrEmpty(https_proxy)) {
logger.logDebug("HTTPS proxy setting from env var HTTPS_PROXY (https_proxy empty): " + https_proxy);
}
} else {
logger.logDebug("HTTPS proxy setting from env var https_proxy: " + https_proxy);
}
if (Strings.isNullOrEmpty(https_proxy)) {
https_proxy = http_proxy;
if (!Strings.isNullOrEmpty(https_proxy)) {
logger.logDebug("HTTPS proxy setting from env var http_proxy (https_proxy and HTTPS_PROXY empty): " + https_proxy);
}
}
if (!Strings.isNullOrEmpty(https_proxy)) {
envVars.add("https_proxy=" + https_proxy);
}
String no_proxy = currentEnv.get("no_proxy");
if (Strings.isNullOrEmpty(no_proxy)) {
no_proxy = currentEnv.get("NO_PROXY");
if (!Strings.isNullOrEmpty(no_proxy)) {
logger.logDebug("NO proxy setting from env var NO_PROXY (no_proxy empty): " + no_proxy);
}
} else {
logger.logDebug("NO proxy setting from env var no_proxy: " + no_proxy);
}
if (!Strings.isNullOrEmpty(no_proxy)) {
envVars.add("no_proxy=" + no_proxy);
}
}
private void sendToBuilder(StringBuilder builder, String frame) {
for (String line: frame.split("[\n\r]")) {
// Workaround for older versions of inline-scan which can include some verbose output from "set -x", starting with "+ " in the stdout
if (!line.startsWith("+ ")) {
builder.append(line);
}
}
}
private void sendToLog(SysdigLogger logger, String frame) {
for (String line: frame.split("[\n\r]")) {
logger.logInfo(line);
}
}
private void sendToDebugLog(SysdigLogger logger, String frame) {
for (String line: frame.split("[\n\r]")) {
logger.logDebug(line);
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
virtualenv.go | package main
import (
"os"
"path"
. "github.com/reujab/bronze/types"
)
func virtualenvSegment(segment *Segment) {
segment.Value = path.Base(os.Getenv("VIRTUAL_ENV"))
}
| [
"\"VIRTUAL_ENV\""
]
| []
| [
"VIRTUAL_ENV"
]
| [] | ["VIRTUAL_ENV"] | go | 1 | 0 | |
internal/auth/jwt/auth.go | package auth
import (
"fmt"
"net/http"
"os"
"time"
"github.com/dgrijalva/jwt-go"
)
var jwtSigningKey = []byte(os.Getenv("SECRET_KEY"))
func GenerateJWT(email string) (string, error) {
token := jwt.New(jwt.SigningMethodHS256)
claims := token.Claims.(jwt.MapClaims)
claims["authorized"] = true
claims["email"] = email
claims["aud"] = "todo.io"
claims["iss"] = "jwtgo.io"
claims["exp"] = time.Now().Add(time.Minute * 10).Unix()
tokenString, err := token.SignedString(jwtSigningKey)
if err != nil {
_ = fmt.Errorf("something Went Wrong: %s", err.Error())
return "", err
}
return tokenString, nil
}
// IsAuthorized Middleware for verifying the JWT token
func IsAuthorized(endpoint http.HandlerFunc) (http.Handler, error) {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header["Token"] != nil {
token, err := jwt.Parse(r.Header["Token"][0], func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("invalid Signing Method. error in parsing token")
}
// verify audience claim
aud := "todo.io"
checkAudience := token.Claims.(jwt.MapClaims).VerifyAudience(aud, false)
if !checkAudience {
return nil, fmt.Errorf("invalid audience")
}
// verify issuer claim
iss := "jwtgo.io"
checkIss := token.Claims.(jwt.MapClaims).VerifyIssuer(iss, false)
if !checkIss {
return nil, fmt.Errorf("invalid issuer")
}
return jwtSigningKey, nil
})
if err != nil {
_, err := fmt.Fprintf(w, err.Error())
if err != nil {
return
}
}
if token.Valid {
endpoint(w, r)
}
} else {
_, err := fmt.Fprintf(w, "No Authorization Token provided")
if err != nil {
return
}
}
}), nil
}
| [
"\"SECRET_KEY\""
]
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | go | 1 | 0 | |
lib/ninja.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import dataclasses
import datetime
import io
import math
import os
import pathlib
import re
import subprocess
import threading
import lib.litani
@dataclasses.dataclass
class _StatusParser:
# Format strings documented here:
# https://ninja-build.org/manual.html#_environment_variables
status_format: str = "<ninja>:%r/%f/%t "
status_re: re.Pattern = re.compile(
r"<ninja>:(?P<running>\d+)/(?P<finished>\d+)/(?P<total>\d+) "
r"(?P<message>.+)")
def parse_status(self, status_str):
m = self.status_re.match(status_str)
if not m:
return None
ret = {k: int(m[k]) for k in ["running", "finished", "total"]}
return {**ret, **{"message": m["message"]}}
@dataclasses.dataclass
class _OutputAccumulator:
out_stream: io.RawIOBase
status_parser: _StatusParser
concurrency_graph: list = dataclasses.field(default_factory=list)
finished: int = None
total: int = None
thread: threading.Thread = None
@staticmethod
def get_tty_width():
if os.getenv("TERM") is None:
return 80
try:
proc = subprocess.run(
["tput", "cols"], text=True, stdout=subprocess.PIPE, check=True)
except (FileNotFoundError, subprocess.CalledProcessError):
return None
if proc.returncode:
return None
try:
return int(proc.stdout.strip())
except TypeError:
return None
def print_progress(self, message):
tty_width = self.get_tty_width()
if not tty_width:
message_fmt = message
else:
f_width = int(math.log10(self.finished)) + 1 if self.finished else 1
t_width = int(math.log10(self.total)) + 1 if self.total else 1
progress_width = f_width + t_width + len("[/] ")
if len(message) + progress_width <= tty_width:
message_fmt = "[%d/%d] %s%s" % (
self.finished, self.total, message,
" " * (tty_width - len(message) - progress_width))
else:
message_width = tty_width - progress_width - len("...")
message_fmt = "[%d/%d] %s..." % (
self.finished, self.total, message[:message_width])
print("\r%s" % message_fmt, end="")
def process_output(self):
while True:
try:
line = self.out_stream.readline()
if not line:
return
except ValueError:
return
status = self.status_parser.parse_status(line)
if not status:
print(line)
continue
now = datetime.datetime.now(datetime.timezone.utc)
now_str = now.strftime(lib.litani.TIME_FORMAT_MS)
status["time"] = now_str
self.concurrency_graph.append(status)
if any((
status["finished"] != self.finished,
status["total"] != self.total)):
self.finished = status["finished"]
self.total = status["total"]
self.print_progress(status["message"])
def join(self):
self.thread.join()
print()
def start(self):
self.thread = threading.Thread(target=self.process_output)
self.thread.start()
@dataclasses.dataclass
class Runner:
ninja_file: pathlib.Path
dry_run: bool
parallelism: int
pipelines: list
ci_stage: str
proc: subprocess.CompletedProcess = None
status_parser: _StatusParser = _StatusParser()
out_acc: _OutputAccumulator = None
def _get_cmd(self):
cmd = [
"ninja",
"-k", "0",
"-f", self.ninja_file,
]
if self.parallelism:
cmd.extend(["-j", self.parallelism])
if self.dry_run:
cmd.append("-n")
if self.pipelines:
targets = ["__litani_pipeline_name_%s" % p for p in self.pipelines]
cmd.extend(targets)
elif self.ci_stage:
targets = ["__litani_ci_stage_%s" % p for p in self.ci_stage]
cmd.extend(targets)
return [str(c) for c in cmd]
def run(self):
env = {
**os.environ,
**{"NINJA_STATUS": self.status_parser.status_format},
}
with subprocess.Popen(
self._get_cmd(), env=env, stdout=subprocess.PIPE, text=True,
) as proc:
self.proc = proc
self.out_acc = _OutputAccumulator(proc.stdout, self.status_parser)
self.out_acc.start()
self.out_acc.join()
def was_successful(self):
return not self.proc.returncode
def get_parallelism_graph(self):
trace = []
for item in self.out_acc.concurrency_graph:
tmp = dict(item)
tmp.pop("message")
trace.append(tmp)
return {
"trace": trace,
"max_parallelism": max(
[i["running"] for i in self.out_acc.concurrency_graph]),
"n_proc": os.cpu_count(),
}
| []
| []
| [
"TERM"
]
| [] | ["TERM"] | python | 1 | 0 | |
adapter/mysql/mysql_test.go | // +build all mysql
package mysql
import (
"context"
"errors"
"os"
"testing"
"github.com/go-rel/rel"
"github.com/go-rel/rel/adapter/specs"
_ "github.com/go-sql-driver/mysql"
"github.com/stretchr/testify/assert"
)
var ctx = context.TODO()
func dsn() string {
if os.Getenv("MYSQL_DATABASE") != "" {
return os.Getenv("MYSQL_DATABASE") + "?charset=utf8&parseTime=True&loc=Local"
}
return "root@tcp(localhost:3306)/rel_test?charset=utf8&parseTime=True&loc=Local"
}
func TestAdapter_specs(t *testing.T) {
adapter, err := Open(dsn())
assert.Nil(t, err)
defer adapter.Close()
repo := rel.New(adapter)
// Prepare tables
teardown := specs.Setup(t, repo)
defer teardown()
// Migration Specs
// - Rename column is only supported by MySQL 8.0
specs.Migrate(t, repo, specs.SkipRenameColumn)
// Query Specs
specs.Query(t, repo)
specs.QueryJoin(t, repo)
specs.QueryNotFound(t, repo)
specs.QueryWhereSubQuery(t, repo)
// Preload specs
specs.PreloadHasMany(t, repo)
specs.PreloadHasManyWithQuery(t, repo)
specs.PreloadHasManySlice(t, repo)
specs.PreloadHasOne(t, repo)
specs.PreloadHasOneWithQuery(t, repo)
specs.PreloadHasOneSlice(t, repo)
specs.PreloadBelongsTo(t, repo)
specs.PreloadBelongsToWithQuery(t, repo)
specs.PreloadBelongsToSlice(t, repo)
// Aggregate Specs
specs.Aggregate(t, repo)
// Insert Specs
specs.Insert(t, repo)
specs.InsertHasMany(t, repo)
specs.InsertHasOne(t, repo)
specs.InsertBelongsTo(t, repo)
specs.Inserts(t, repo)
specs.InsertAll(t, repo)
specs.InsertAllPartialCustomPrimary(t, repo)
// Update Specs
specs.Update(t, repo)
specs.UpdateNotFound(t, repo)
specs.UpdateHasManyInsert(t, repo)
specs.UpdateHasManyUpdate(t, repo)
specs.UpdateHasManyReplace(t, repo)
specs.UpdateHasOneInsert(t, repo)
specs.UpdateHasOneUpdate(t, repo)
specs.UpdateBelongsToInsert(t, repo)
specs.UpdateBelongsToUpdate(t, repo)
specs.UpdateAtomic(t, repo)
specs.Updates(t, repo)
specs.UpdateAll(t, repo)
// Delete specs
specs.Delete(t, repo)
specs.DeleteBelongsTo(t, repo)
specs.DeleteHasOne(t, repo)
specs.DeleteHasMany(t, repo)
specs.DeleteAll(t, repo)
// Constraint specs
// - Check constraint is not supported by mysql
specs.UniqueConstraintOnInsert(t, repo)
specs.UniqueConstraintOnUpdate(t, repo)
specs.ForeignKeyConstraintOnInsert(t, repo)
specs.ForeignKeyConstraintOnUpdate(t, repo)
}
func TestAdapter_Open(t *testing.T) {
// with parameter
assert.NotPanics(t, func() {
adapter, _ := Open("root@tcp(localhost:3306)/rel_test?charset=utf8")
defer adapter.Close()
})
// without paremeter
assert.NotPanics(t, func() {
adapter, _ := Open("root@tcp(localhost:3306)/rel_test")
defer adapter.Close()
})
}
func TestAdapter_Transaction_commitError(t *testing.T) {
adapter, err := Open(dsn())
assert.Nil(t, err)
defer adapter.Close()
assert.NotNil(t, adapter.Commit(ctx))
}
func TestAdapter_Transaction_rollbackError(t *testing.T) {
adapter, err := Open(dsn())
assert.Nil(t, err)
defer adapter.Close()
assert.NotNil(t, adapter.Rollback(ctx))
}
func TestAdapter_Exec_error(t *testing.T) {
adapter, err := Open(dsn())
assert.Nil(t, err)
defer adapter.Close()
_, _, err = adapter.Exec(ctx, "error", nil)
assert.NotNil(t, err)
}
func TestCheck(t *testing.T) {
assert.Panics(t, func() {
check(errors.New("error"))
})
}
| [
"\"MYSQL_DATABASE\"",
"\"MYSQL_DATABASE\""
]
| []
| [
"MYSQL_DATABASE"
]
| [] | ["MYSQL_DATABASE"] | go | 1 | 0 | |
django_news/wsgi.py | """
WSGI config for django_news project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_news.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/config.go | package pkg
import (
"os"
"github.com/spf13/viper"
)
type ServerConfig struct {
Address string
HTTPS bool
}
type DatabaseConfig struct {
Driver string
Address string
}
type AppConfig struct {
Server ServerConfig
Database DatabaseConfig
Debug bool
Testing bool
}
func isValidEnviroment(env string) bool {
switch env {
case
"test",
"local",
"dev":
return true
}
return false
}
func defaultConfig() {
srcName := "config"
if isValidEnviroment(os.Getenv("APP_ENV")) {
srcName = srcName + "_" + os.Getenv("APP_ENV")
}
viper.SetConfigName(srcName)
viper.AddConfigPath(".")
viper.SetConfigType("yaml")
viper.SetDefault("server.address", "0.0.0.0:8080")
viper.SetDefault("server.https", false)
viper.SetDefault("database.driver", "sqlite3")
viper.SetDefault("database.address", "database.db")
viper.SetDefault("debug", false)
viper.SetDefault("testing", false)
}
func configFactory() AppConfig {
return AppConfig{
Server: ServerConfig{
Address: viper.GetString("server.address"),
HTTPS: viper.GetBool("server.https"),
},
Database: DatabaseConfig{
Driver: viper.GetString("database.driver"),
Address: viper.GetString("database.address"),
},
Testing: viper.GetBool("testing"),
Debug: viper.GetBool("debug"),
}
}
// ReadConfig from ./config.yaml or ./config_{{enviroment}}.yaml
func ReadConfig() (AppConfig, error) {
defaultConfig()
err := viper.ReadInConfig()
if err != nil {
return configFactory(), err
}
return configFactory(), nil
}
// WriteConfig to ./config.yaml or ./config_{{environment}}.yaml
func WriteConfig() error {
defaultConfig()
return viper.SafeWriteConfig()
}
| [
"\"APP_ENV\"",
"\"APP_ENV\""
]
| []
| [
"APP_ENV"
]
| [] | ["APP_ENV"] | go | 1 | 0 | |
spring-core/src/test/java/org/springframework/util/SystemPropertyUtilsTests.java | /*
* Copyright 2002-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.util;
import java.util.Map;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException;
/**
* @author Rob Harrop
* @author Juergen Hoeller
*/
public class SystemPropertyUtilsTests {
@Test
public void testReplaceFromSystemProperty() {
System.setProperty("test.prop", "bar");
try {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop}");
assertThat(resolved).isEqualTo("bar");
}
finally {
System.getProperties().remove("test.prop");
}
}
@Test
public void testReplaceFromSystemPropertyWithDefault() {
System.setProperty("test.prop", "bar");
try {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:foo}");
assertThat(resolved).isEqualTo("bar");
}
finally {
System.getProperties().remove("test.prop");
}
}
@Test
public void testReplaceFromSystemPropertyWithExpressionDefault() {
System.setProperty("test.prop", "bar");
try {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:#{foo.bar}}");
assertThat(resolved).isEqualTo("bar");
}
finally {
System.getProperties().remove("test.prop");
}
}
@Test
public void testReplaceFromSystemPropertyWithExpressionContainingDefault() {
System.setProperty("test.prop", "bar");
try {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:Y#{foo.bar}X}");
assertThat(resolved).isEqualTo("bar");
}
finally {
System.getProperties().remove("test.prop");
}
}
@Test
public void testReplaceWithDefault() {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:foo}");
assertThat(resolved).isEqualTo("foo");
}
@Test
public void testReplaceWithExpressionDefault() {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:#{foo.bar}}");
assertThat(resolved).isEqualTo("#{foo.bar}");
}
@Test
public void testReplaceWithExpressionContainingDefault() {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:Y#{foo.bar}X}");
assertThat(resolved).isEqualTo("Y#{foo.bar}X");
}
@Test
public void testReplaceWithNoDefault() {
assertThatIllegalArgumentException().isThrownBy(() ->
SystemPropertyUtils.resolvePlaceholders("${test.prop}"));
}
@Test
public void testReplaceWithNoDefaultIgnored() {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop}", true);
assertThat(resolved).isEqualTo("${test.prop}");
}
@Test
public void testReplaceWithEmptyDefault() {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop:}");
assertThat(resolved).isEqualTo("");
}
@Test
public void testRecursiveFromSystemProperty() {
System.setProperty("test.prop", "foo=${bar}");
System.setProperty("bar", "baz");
try {
String resolved = SystemPropertyUtils.resolvePlaceholders("${test.prop}");
assertThat(resolved).isEqualTo("foo=baz");
}
finally {
System.getProperties().remove("test.prop");
System.getProperties().remove("bar");
}
}
@Test
public void testReplaceFromEnv() {
Map<String,String> env = System.getenv();
if (env.containsKey("PATH")) {
String text = "${PATH}";
assertThat(SystemPropertyUtils.resolvePlaceholders(text)).isEqualTo(env.get("PATH"));
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
cmd/init_integration_test.go | // +build integration
// +build !shcd
/*
Copyright 2021 Hewlett Packard Enterprise Development LP
*/
package cmd
import (
"log"
"os"
"path/filepath"
"strings"
"testing"
"github.com/Cray-HPE/cray-site-init/pkg/csi"
)
// ConfigInitTest runs 'csi config init' on a system passed to it using the cobra command object
func ConfigInitTest(system string) {
var cwd string
// pseudo-pushd: Move into the directory (this should have been created in get_test.go)
confdir, _ := filepath.Abs(system)
os.Chdir(filepath.Join(confdir))
cwd, _ = os.Getwd()
log.Printf("pushd ===> %s", cwd)
// Runs 'config init' without any arguments (this requires system_config.yaml to be present in the dir)
// csi.ExecuteCommandC(rootCmd, []string{"config", "init"})
conf := confdir + "/system_config.yaml"
csi.ExecuteCommandC(rootCmd, []string{"--config", conf, "config", "init", "--cmn-gateway", "10.99.0.1"})
// pseudo-popd
os.Chdir(filepath.Join(".."))
cwd, _ = os.Getwd()
log.Printf("popd ===> %s", cwd)
}
func TestConfigInit_GeneratePayload(t *testing.T) {
var systems []string
// The sample data set of systems names to use
// For now, these should align with the system names in get_test.go
if os.Getenv("CSI_SHASTA_SYSTEMS") != "" {
systems = strings.Split(os.Getenv("CSI_SHASTA_SYSTEMS"), " ")
} else {
t.Errorf("CSI_SHASTA_SYSTEMS needs to be set")
}
for _, s := range systems {
// Run 'config init' on each system's set of seed files
// These files should have been previously gathered from get_test.go
ConfigInitTest(s)
}
}
// TODO: test 'config init' using command line flags instead of system_config.yaml
// TODO: test--with some tolerance--that generated output is similiar to what we know is a good config
| [
"\"CSI_SHASTA_SYSTEMS\"",
"\"CSI_SHASTA_SYSTEMS\""
]
| []
| [
"CSI_SHASTA_SYSTEMS"
]
| [] | ["CSI_SHASTA_SYSTEMS"] | go | 1 | 0 | |
server/server.go | // Copyright 2019 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package server implements an environment for running LUCI servers.
//
// It interprets command line flags and initializes the serving environment with
// following services available via context.Context:
// * go.chromium.org/luci/common/logging: Logging.
// * go.chromium.org/luci/common/trace: Tracing.
// * go.chromium.org/luci/server/caching: Process cache.
// * go.chromium.org/luci/server/secrets: Secrets (optional).
// * go.chromium.org/luci/server/settings: Access to app settings (optional).
// * go.chromium.org/luci/server/auth: Making authenticated calls.
// * go.chromium.org/luci/server/redisconn: Redis connection pool (optional).
// * go.chromium.org/gae: Datastore (optional).
//
// Usage example:
//
// func main() {
// server.Main(nil, func(srv *server.Server) error {
// // Initialize global state, change root context.
// if err := initializeGlobalStuff(srv.Context); err != nil {
// return err
// }
// srv.Context = injectGlobalStuff(srv.Context)
//
// // Install regular HTTP routes.
// srv.Routes.GET("/", router.MiddlewareChain{}, func(c *router.Context) {
// // ...
// })
//
// // Install pRPC services.
// servicepb.RegisterSomeServer(srv.PRPC, &SomeServer{})
// return nil
// })
// }
package server
import (
"context"
cryptorand "crypto/rand"
"encoding/binary"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"math/rand"
"net"
"net/http"
"net/http/pprof"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"cloud.google.com/go/datastore"
"github.com/gomodule/redigo/redis"
"golang.org/x/oauth2"
"google.golang.org/api/option"
"contrib.go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/exporter/stackdriver/propagation"
octrace "go.opencensus.io/trace"
"go.chromium.org/gae/impl/cloud"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/data/caching/cacheContext"
"go.chromium.org/luci/common/data/rand/mathrand"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/iotools"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/common/logging/gkelogger"
"go.chromium.org/luci/common/logging/gologger"
"go.chromium.org/luci/common/system/signals"
"go.chromium.org/luci/common/trace"
tsmoncommon "go.chromium.org/luci/common/tsmon"
"go.chromium.org/luci/common/tsmon/metric"
"go.chromium.org/luci/common/tsmon/target"
"go.chromium.org/luci/hardcoded/chromeinfra" // should be used ONLY in Main()
"go.chromium.org/luci/grpc/discovery"
"go.chromium.org/luci/grpc/grpcmon"
"go.chromium.org/luci/grpc/grpcutil"
"go.chromium.org/luci/grpc/prpc"
"go.chromium.org/luci/web/gowrappers/rpcexplorer"
clientauth "go.chromium.org/luci/auth"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/auth/authdb"
"go.chromium.org/luci/server/auth/authdb/dump"
"go.chromium.org/luci/server/caching"
"go.chromium.org/luci/server/internal"
"go.chromium.org/luci/server/middleware"
"go.chromium.org/luci/server/portal"
"go.chromium.org/luci/server/redisconn"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/server/secrets"
"go.chromium.org/luci/server/settings"
"go.chromium.org/luci/server/tsmon"
)
// DefaultOAuthScopes is a list of OAuth scopes we want a local user to grant us
// when running the server locally.
var DefaultOAuthScopes = []string{
"https://www.googleapis.com/auth/cloud-platform", // for accessing GCP services
"https://www.googleapis.com/auth/userinfo.email", // for accessing LUCI services
}
const (
// Path of the health check endpoint.
healthEndpoint = "/healthz"
// Log a warning if health check is slower than this.
healthTimeLogThreshold = 50 * time.Millisecond
)
var (
versionMetric = metric.NewString(
"server/version",
"Version of the running container image (taken from -container-image-id).",
nil)
)
// Main initializes the server and runs its serving loop until SIGTERM.
//
// Registers all options in the default flag set and uses `flag.Parse` to parse
// them. If 'opts' is nil, the default options will be used.
//
// On errors, logs them and aborts the process with non-zero exit code.
func Main(opts *Options, init func(srv *Server) error) {
mathrand.SeedRandomly()
if opts == nil {
opts = &Options{
ClientAuth: chromeinfra.DefaultAuthOptions(),
}
}
opts.Register(flag.CommandLine)
flag.Parse()
srv, err := New(*opts)
if err != nil {
srv.Fatal(err)
}
if err = init(srv); err != nil {
srv.Fatal(err)
}
if err = srv.ListenAndServe(); err != nil {
srv.Fatal(err)
}
}
// Options are exposed as command line flags.
type Options struct {
Prod bool // must be set when running in production
HTTPAddr string // address to bind the main listening socket to
AdminAddr string // address to bind the admin socket to
RootSecretPath string // path to a JSON file with the root secret key
SettingsPath string // path to a JSON file with app settings
ClientAuth clientauth.Options // base settings for client auth options
TokenCacheDir string // where to cache auth tokens (optional)
AuthDBPath string // if set, load AuthDB from a file
AuthServiceHost string // hostname of an Auth Service to use
AuthDBDump string // Google Storage path to fetch AuthDB dumps from
AuthDBSigner string // service account that signs AuthDB dumps
RedisAddr string // Redis server to connect to as "host:port" (optional)
RedisDB int // index of a logical Redis DB to use by default (optional)
CloudProject string // name of hosting Google Cloud Project
TraceSampling string // what portion of traces to upload to StackDriver
TsMonAccount string // service account to flush metrics as
TsMonServiceName string // service name of tsmon target
TsMonJobName string // job name of tsmon target
ContainerImageID string // ID of the container image with this binary, for logs (optional)
testCtx context.Context // base context for tests
testSeed int64 // used to seed rng in tests
testStdout gkelogger.LogEntryWriter // mocks stdout in tests
testStderr gkelogger.LogEntryWriter // mocks stderr in tests
testListeners map[string]net.Listener // addr => net.Listener, for tests
testAuthDB authdb.DB // AuthDB to use in tests
}
// Register registers the command line flags.
func (o *Options) Register(f *flag.FlagSet) {
if o.HTTPAddr == "" {
o.HTTPAddr = "127.0.0.1:8800"
}
if o.AdminAddr == "" {
o.AdminAddr = "127.0.0.1:8900"
}
f.BoolVar(&o.Prod, "prod", o.Prod, "Switch the server into production mode")
f.StringVar(&o.HTTPAddr, "http-addr", o.HTTPAddr, "Address to bind the main listening socket to")
f.StringVar(&o.AdminAddr, "admin-addr", o.AdminAddr, "Address to bind the admin socket to")
f.StringVar(&o.RootSecretPath, "root-secret-path", o.RootSecretPath, "Path to a JSON file with the root secret key, or literal \":dev\" for development not-really-a-secret")
f.StringVar(&o.SettingsPath, "settings-path", o.SettingsPath, "Path to a JSON file with app settings")
f.StringVar(
&o.ClientAuth.ServiceAccountJSONPath,
"service-account-json",
o.ClientAuth.ServiceAccountJSONPath,
"Path to a JSON file with service account private key",
)
f.StringVar(
&o.ClientAuth.ActAsServiceAccount,
"act-as",
o.ClientAuth.ActAsServiceAccount,
"Act as this service account",
)
f.StringVar(
&o.TokenCacheDir,
"token-cache-dir",
o.TokenCacheDir,
"Where to cache auth tokens (optional)",
)
f.StringVar(
&o.AuthDBPath,
"auth-db-path",
o.AuthDBPath,
"If set, load AuthDB text proto from this file (incompatible with -auth-service-host)",
)
f.StringVar(
&o.AuthServiceHost,
"auth-service-host",
o.AuthServiceHost,
"Hostname of an Auth Service to use (incompatible with -auth-db-path)",
)
f.StringVar(
&o.AuthDBDump,
"auth-db-dump",
o.AuthDBDump,
"Google Storage path to fetch AuthDB dumps from. Default is gs://<auth-service-host>/auth-db",
)
f.StringVar(
&o.AuthDBSigner,
"auth-db-signer",
o.AuthDBSigner,
"Service account that signs AuthDB dumps. Default is derived from -auth-service-host if it is *.appspot.com",
)
f.StringVar(
&o.RedisAddr,
"redis-addr",
o.RedisAddr,
"Redis server to connect to as \"host:port\" (optional)",
)
f.IntVar(
&o.RedisDB,
"redis-db",
o.RedisDB,
"Index of a logical Redis DB to use by default (optional)",
)
f.StringVar(
&o.CloudProject,
"cloud-project",
o.CloudProject,
"Name of hosting Google Cloud Project (optional)",
)
f.StringVar(
&o.TraceSampling,
"trace-sampling",
o.TraceSampling,
"What portion of traces to upload to StackDriver. Either a percent (i.e. '0.1%') or a QPS (i.e. '1qps'). Default is 0.1qps.",
)
f.StringVar(
&o.TsMonAccount,
"ts-mon-account",
o.TsMonAccount,
"Collect and flush tsmon metrics using this account for auth (disables tsmon if not set)",
)
f.StringVar(
&o.TsMonServiceName,
"ts-mon-service-name",
o.TsMonServiceName,
"Service name of tsmon target (disables tsmon if not set)",
)
f.StringVar(
&o.TsMonJobName,
"ts-mon-job-name",
o.TsMonJobName,
"Job name of tsmon target (disables tsmon if not set)",
)
f.StringVar(
&o.ContainerImageID,
"container-image-id",
o.ContainerImageID,
"ID of the container image with this binary, for logs (optional)",
)
}
// imageVersion extracts image tag or digest from ContainerImageID.
//
// This is eventually reported as a value of 'server/version' metric.
//
// Returns "unknown" if ContainerImageID is empty or malformed.
func (o *Options) imageVersion() string {
// Recognize "<path>@sha256:<digest>" and "<path>:<tag>".
idx := strings.LastIndex(o.ContainerImageID, "@")
if idx == -1 {
idx = strings.LastIndex(o.ContainerImageID, ":")
}
if idx == -1 {
return "unknown"
}
return o.ContainerImageID[idx+1:]
}
// shouldEnableTracing is true if options indicate we should enable tracing.
func (o *Options) shouldEnableTracing() bool {
switch {
case o.CloudProject == "":
return false // nowhere to upload traces to
case !o.Prod && o.TraceSampling == "":
return false // in dev mode don't upload samples by default
default:
return true
}
}
// Server is responsible for initializing and launching the serving environment.
//
// Generally assumed to be a singleton: do not launch multiple Server instances
// within the same process, use RegisterHTTP instead if you want to expose
// multiple ports.
//
// Doesn't do TLS. Should be sitting behind a load balancer that terminates
// TLS.
type Server struct {
// Context is the root context used by all requests and background activities.
//
// Can be replaced (by a derived context) before ListenAndServe call, for
// example to inject values accessible to all request handlers.
Context context.Context
// Routes is HTTP routes exposed via HTTPAddr port.
//
// Should be populated before ListenAndServe call.
Routes *router.Router
// PRPC is pRPC service with APIs exposed via HTTPAddr port.
//
// Should be populated before ListenAndServe call.
PRPC *prpc.Server
// Options is a copy of options passed to New.
Options Options
startTime time.Time // for calculating uptime for /healthz
hostname string // obtained from os.Hostname in New
lastReqTime atomic.Value // time.Time when the last request finished
stdout gkelogger.LogEntryWriter // for logging to stdout, nil in dev mode
stderr gkelogger.LogEntryWriter // for logging to stderr, nil in dev mode
m sync.Mutex // protects fields below
httpSrv []*http.Server // all registered HTTP servers
started bool // true inside and after ListenAndServe
stopped bool // true inside and after Shutdown
ready chan struct{} // closed right before starting the serving loop
done chan struct{} // closed after Shutdown returns
rndM sync.Mutex // protects rnd
rnd *rand.Rand // used to generate trace and operation IDs
bgrDone chan struct{} // closed to stop background activities
bgrWg sync.WaitGroup // waits for RunInBackground goroutines to stop
cleanupM sync.Mutex // protects 'cleanup' and actual cleanup critical section
cleanup []func()
secrets *secrets.DerivedStore // indirectly used to derive XSRF tokens and such, may be nil
settings *settings.ExternalStorage // backing store for settings.Get(...) API
tsmon *tsmon.State // manages flushing of tsmon metrics
sampler octrace.Sampler // trace sampler to use for top level spans
authM sync.RWMutex
authPerScope map[string]scopedAuth // " ".join(scopes) => ...
authDB atomic.Value // last known good authdb.DB instance
redisPool *redis.Pool // nil if redis is not used
dsClient *datastore.Client // nil if datastore is not used
}
// scopedAuth holds TokenSource and Authenticator that produced it.
type scopedAuth struct {
source oauth2.TokenSource
authen *clientauth.Authenticator
}
// New constructs a new server instance.
//
// It hosts one or more HTTP servers and starts and stops them in unison. It is
// also responsible for preparing contexts for incoming requests.
//
// On errors returns partially initialized server (always non-nil). At least
// its logging will be configured and can be used to report the error. Trying
// to use such partially initialized server for anything else is undefined
// behavior.
func New(opts Options) (srv *Server, err error) {
seed := opts.testSeed
if seed == 0 {
if err := binary.Read(cryptorand.Reader, binary.BigEndian, &seed); err != nil {
panic(err)
}
}
ctx := opts.testCtx
if ctx == nil {
ctx = context.Background()
}
// Do this very early, so that various transports created during the
// initialization are already wrapped with tracing. The rest of the tracing
// infra (e.g. actual uploads) is initialized later in initTracing.
if opts.shouldEnableTracing() {
internal.EnableOpenCensusTracing()
}
srv = &Server{
Context: ctx,
Options: opts,
startTime: clock.Now(ctx).UTC(),
ready: make(chan struct{}),
done: make(chan struct{}),
rnd: rand.New(rand.NewSource(seed)),
bgrDone: make(chan struct{}),
authPerScope: map[string]scopedAuth{},
sampler: octrace.NeverSample(),
}
// Cleanup what we can on failures.
defer func() {
if err != nil {
srv.runCleanup()
}
}()
// Logging is needed to report any errors during the early initialization.
srv.initLogging()
// Need the hostname (e.g. pod name on k8s) for logs and metrics.
srv.hostname, err = os.Hostname()
if err != nil {
return srv, errors.Annotate(err, "failed to get own hostname").Err()
}
logging.Infof(srv.Context, "Running on %s", srv.hostname)
if opts.ContainerImageID != "" {
logging.Infof(srv.Context, "Container image is %s", opts.ContainerImageID)
}
// Configure base server subsystems by injecting them into the root context
// inherited later by all requests.
srv.Context = caching.WithProcessCacheData(srv.Context, caching.NewProcessCacheData())
if err := srv.initSecrets(); err != nil {
return srv, errors.Annotate(err, "failed to initialize secrets store").Err()
}
if err := srv.initSettings(); err != nil {
return srv, errors.Annotate(err, "failed to initialize settings").Err()
}
if err := srv.initAuth(); err != nil {
return srv, errors.Annotate(err, "failed to initialize auth").Err()
}
if err := srv.initTSMon(); err != nil {
return srv, errors.Annotate(err, "failed to initialize tsmon").Err()
}
if err := srv.initTracing(); err != nil {
return srv, errors.Annotate(err, "failed to initialize tracing").Err()
}
if err := srv.initRedis(); err != nil {
return srv, errors.Annotate(err, "failed to initialize Redis pool").Err()
}
if err := srv.initDatastoreClient(); err != nil {
return srv, errors.Annotate(err, "failed to initialize Datastore client").Err()
}
if err := srv.initCloudContext(); err != nil {
return srv, errors.Annotate(err, "failed to initialize cloud context").Err()
}
if err := srv.initMainPort(); err != nil {
return srv, errors.Annotate(err, "failed to initialize the main port").Err()
}
if err := srv.initAdminPort(); err != nil {
return srv, errors.Annotate(err, "failed to initialize the admin port").Err()
}
return srv, nil
}
// RegisterHTTP prepares an additional HTTP server.
//
// Can be used to open more listening HTTP ports (in addition to opts.HTTPAddr
// and opts.AdminAddr). Returns a router that should be populated with routes
// exposed through the added server.
//
// Should be called before ListenAndServe (panics otherwise).
func (s *Server) RegisterHTTP(addr string) *router.Router {
s.m.Lock()
defer s.m.Unlock()
if s.started {
s.Fatal(errors.Reason("the server has already been started").Err())
}
mw := router.NewMiddlewareChain(
s.rootMiddleware, // prepares the per-request context
middleware.WithPanicCatcher, // transforms panics into HTTP 500
)
if s.tsmon != nil {
mw = mw.Extend(s.tsmon.Middleware) // collect HTTP requests metrics
}
// Setup middleware chain used by ALL requests.
r := router.New()
r.Use(mw)
// Mandatory health check/readiness probe endpoint.
r.GET(healthEndpoint, router.MiddlewareChain{}, func(c *router.Context) {
c.Writer.Write([]byte(s.healthResponse(c.Context)))
})
// Add NotFound handler wrapped in our middlewares so that unrecognized
// requests are at least logged. If we don't do that they'll be handled
// completely silently and this is very confusing when debugging 404s.
r.NotFound(router.MiddlewareChain{}, func(c *router.Context) {
http.NotFound(c.Writer, c.Request)
})
s.httpSrv = append(s.httpSrv, &http.Server{
Addr: addr,
Handler: r,
ErrorLog: nil, // TODO(vadimsh): Log via 'logging' package.
})
return r
}
// RunInBackground launches the given callback in a separate goroutine right
// before starting the serving loop.
//
// If the server is already running, launches it right away. If the server
// fails to start, the goroutines will never be launched.
//
// Should be used for background asynchronous activities like reloading configs.
//
// All logs lines emitted by the callback are annotated with "activity" field
// which can be arbitrary, but by convention has format "<namespace>.<name>",
// where "luci" namespace is reserved for internal activities.
//
// The context passed to the callback is canceled when the server is shutting
// down. It is expected the goroutine will exit soon after the context is
// canceled.
func (s *Server) RunInBackground(activity string, f func(context.Context)) {
s.bgrWg.Add(1)
go func() {
defer s.bgrWg.Done()
select {
case <-s.ready:
// Construct the context after the server is fully initialized. Cancel it
// as soon as bgrDone is signaled.
ctx, cancel := context.WithCancel(s.Context)
ctx = logging.SetField(ctx, "activity", activity)
ctx = cacheContext.Wrap(ctx)
defer cancel()
go func() {
select {
case <-s.bgrDone:
cancel()
case <-ctx.Done():
}
}()
f(ctx)
case <-s.bgrDone:
// the server is closed, no need to run f() anymore
}
}()
}
// ListenAndServe launches the serving loop.
//
// Blocks forever or until the server is stopped via Shutdown (from another
// goroutine or from a SIGTERM handler). Returns nil if the server was shutdown
// correctly or an error if it failed to start or unexpectedly died. The error
// is logged inside.
//
// Should be called only once. Panics otherwise.
func (s *Server) ListenAndServe() error {
s.m.Lock()
wasRunning := s.started
httpSrv := append(make([]*http.Server, 0, len(s.httpSrv)), s.httpSrv...)
s.started = true
s.m.Unlock()
if wasRunning {
s.Fatal(errors.Reason("the server has already been started").Err())
}
// Put monitoring interceptor on top of whatever interceptors were installed
// by the user of Server via public s.PRPC.UnaryServerInterceptor.
s.PRPC.UnaryServerInterceptor = grpcmon.NewUnaryServerInterceptor(
grpcutil.NewUnaryServerPanicCatcher(
s.PRPC.UnaryServerInterceptor,
),
)
// Catch SIGTERM while inside this function. Upon receiving SIGTERM, wait
// until the pod is removed from the load balancer before actually shutting
// down and refusing new connections. If we shutdown immediately, some clients
// may see connection errors, because they are not aware yet the server is
// closing: Pod shutdown sequence and Endpoints list updates are racing with
// each other, we want Endpoints list updates to win, i.e. we want the pod to
// actually be fully alive as long as it is still referenced in Endpoints
// list. We can't guarantee this, but we can improve chances.
stop := signals.HandleInterrupt(func() {
if s.Options.Prod {
s.waitUntilNotServing()
}
s.Shutdown()
})
defer stop()
// Log how long it took from 'New' to the serving loop.
logging.Infof(s.Context, "Startup done in %s", clock.Now(s.Context).Sub(s.startTime))
// Unblock all pending RunInBackground goroutines, so they can start.
close(s.ready)
// Run serving loops in parallel.
errs := make(errors.MultiError, len(httpSrv))
wg := sync.WaitGroup{}
wg.Add(len(httpSrv))
for i, srv := range httpSrv {
logging.Infof(s.Context, "Serving http://%s", srv.Addr)
i := i
srv := srv
go func() {
defer wg.Done()
if err := s.serveLoop(srv); err != http.ErrServerClosed {
logging.WithError(err).Errorf(s.Context, "Server at %s failed", srv.Addr)
errs[i] = err
s.Shutdown() // close all other servers
}
}()
}
wg.Wait()
// Per http.Server docs, we end up here *immediately* after Shutdown call was
// initiated. Some requests can still be in-flight. We block until they are
// done (as indicated by Shutdown call itself exiting).
logging.Infof(s.Context, "Waiting for the server to stop...")
<-s.done
logging.Infof(s.Context, "The serving loop stopped, running the final cleanup...")
s.runCleanup()
logging.Infof(s.Context, "The server has stopped")
if errs.First() != nil {
return errs
}
return nil
}
// Shutdown gracefully stops the server if it was running.
//
// Blocks until the server is stopped. Can be called multiple times.
func (s *Server) Shutdown() {
s.m.Lock()
defer s.m.Unlock()
if s.stopped {
return
}
logging.Infof(s.Context, "Shutting down the server...")
// Tell all RunInBackground goroutines to stop.
close(s.bgrDone)
// Stop all http.Servers in parallel. Each Shutdown call blocks until the
// corresponding server is stopped.
wg := sync.WaitGroup{}
wg.Add(len(s.httpSrv))
for _, srv := range s.httpSrv {
srv := srv
go func() {
defer wg.Done()
srv.Shutdown(s.Context)
}()
}
wg.Wait()
// Wait for all background goroutines to stop.
s.bgrWg.Wait()
// Notify ListenAndServe that it can exit now.
s.stopped = true
close(s.done)
}
// Fatal logs the error and immediately shuts down the process with exit code 3.
//
// No cleanup is performed. Deferred statements are not run. Not recoverable.
func (s *Server) Fatal(err error) {
errors.Log(s.Context, err)
os.Exit(3)
}
// healthResponse prepares text/plan response for the health check endpoints.
//
// It additionally contains some easy to obtain information that may help in
// debugging deployments.
func (s *Server) healthResponse(c context.Context) string {
maybeEmpty := func(s string) string {
if s == "" {
return "<unknown>"
}
return s
}
return strings.Join([]string{
"OK",
"",
"uptime: " + clock.Now(c).Sub(s.startTime).String(),
"image: " + maybeEmpty(s.Options.ContainerImageID),
"",
"service: " + maybeEmpty(s.Options.TsMonServiceName),
"job: " + maybeEmpty(s.Options.TsMonJobName),
"host: " + s.hostname,
"",
}, "\n")
}
// serveLoop binds the socket and launches the serving loop.
//
// Basically srv.ListenAndServe with some testing helpers.
func (s *Server) serveLoop(srv *http.Server) error {
// If not running tests, let http.Server bind the socket as usual.
if s.Options.testListeners == nil {
return srv.ListenAndServe()
}
// In test mode the listener MUST be prepared already.
if l, _ := s.Options.testListeners[srv.Addr]; l != nil {
return srv.Serve(l)
}
return errors.Reason("test listener is not set").Err()
}
// waitUntilNotServing is called during the graceful shutdown and it tries to
// figure out when the traffic stops flowing to the server (i.e. when it is
// removed from the load balancer).
//
// It's a heuristic optimization for the case when the load balancer keeps
// sending traffic to a terminating Pod for some time after the Pod entered
// "Terminating" state. It can happen due to latencies in Endpoints list
// updates. We want to keep the listening socket open as long as there are
// incoming requests (but no longer than 1 min).
//
// Effective only for servers that serve >0.2 QPS in a steady state.
func (s *Server) waitUntilNotServing() {
logging.Infof(s.Context, "Received SIGTERM, waiting for the traffic to stop...")
deadline := clock.Now(s.Context).Add(time.Minute)
for {
now := clock.Now(s.Context)
lastReq, ok := s.lastReqTime.Load().(time.Time)
if !ok || now.Sub(lastReq) > 5*time.Second {
logging.Infof(s.Context, "No requests in last 5 sec, proceeding with the shutdown...")
break
}
if now.After(deadline) {
logging.Warningf(s.Context, "Gave up waiting for the traffic to stop, proceeding with the shutdown...")
break
}
time.Sleep(100 * time.Millisecond)
}
}
// RegisterCleanup registers a callback that is run in ListenAndServe after the
// server has exited the serving loop.
//
// Registering a new cleanup callback from within a cleanup causes a deadlock,
// don't do that.
func (s *Server) RegisterCleanup(cb func()) {
s.cleanupM.Lock()
defer s.cleanupM.Unlock()
s.cleanup = append(s.cleanup, cb)
}
// runCleanup runs all registered cleanup functions (sequentially in reverse
// order).
func (s *Server) runCleanup() {
s.cleanupM.Lock()
defer s.cleanupM.Unlock()
for i := len(s.cleanup) - 1; i >= 0; i-- {
s.cleanup[i]()
}
}
// genUniqueID returns pseudo-random hex string of given even length.
func (s *Server) genUniqueID(l int) string {
b := make([]byte, l/2)
s.rndM.Lock()
s.rnd.Read(b)
s.rndM.Unlock()
return hex.EncodeToString(b)
}
var cloudTraceFormat = propagation.HTTPFormat{}
// rootMiddleware prepares the per-request context.
func (s *Server) rootMiddleware(c *router.Context, next router.Handler) {
// Wrap the request in a tracing span. The span is closed in the defer below
// (where we know the response status code). If this is a health check, open
// the span nonetheless, but do not record it (health checks are spammy and
// not interesting). This way the code is simpler ('span' is always non-nil
// and has TraceID). Additionally if some of health check code opens a span
// of its own, it will be ignored (as a child of not-recorded span).
healthCheck := isHealthCheckRequest(c.Request)
ctx, span := s.startRequestSpan(s.Context, c.Request, healthCheck)
// Associate all logs with the span via its Trace ID.
spanCtx := span.SpanContext()
traceID := hex.EncodeToString(spanCtx.TraceID[:])
// Track how many response bytes are sent and what status is set.
rw := iotools.NewResponseWriter(c.Writer)
c.Writer = rw
// Observe maximum emitted severity to use it as an overall severity for the
// request log entry.
severityTracker := gkelogger.SeverityTracker{Out: s.stdout}
// Log the overall request information when the request finishes. Use TraceID
// to correlate this log entry with entries emitted by the request handler
// below.
started := clock.Now(s.Context)
defer func() {
now := clock.Now(s.Context)
latency := now.Sub(started)
statusCode := rw.Status()
if healthCheck {
// Do not log fast health check calls AT ALL, they just spam logs.
if latency < healthTimeLogThreshold {
return
}
// Emit a warning if the health check is slow, this likely indicates
// high CPU load.
logging.Warningf(c.Context, "Health check is slow: %s > %s", latency, healthTimeLogThreshold)
} else {
s.lastReqTime.Store(now)
}
entry := gkelogger.LogEntry{
Severity: severityTracker.MaxSeverity(),
Time: gkelogger.FormatTime(now),
TraceID: traceID,
TraceSampled: span.IsRecordingEvents(),
SpanID: spanCtx.SpanID.String(), // the top-level span ID
RequestInfo: &gkelogger.RequestInfo{
Method: c.Request.Method,
URL: getRequestURL(c.Request),
Status: statusCode,
RequestSize: fmt.Sprintf("%d", c.Request.ContentLength),
ResponseSize: fmt.Sprintf("%d", rw.ResponseSize()),
UserAgent: c.Request.UserAgent(),
RemoteIP: getRemoteIP(c.Request),
Latency: fmt.Sprintf("%fs", latency.Seconds()),
},
}
if s.Options.Prod {
s.stderr.Write(&entry)
} else {
logging.Infof(s.Context, "%d %s %q (%s)",
entry.RequestInfo.Status,
entry.RequestInfo.Method,
entry.RequestInfo.URL,
entry.RequestInfo.Latency,
)
}
span.AddAttributes(
octrace.Int64Attribute("/http/status_code", int64(statusCode)),
octrace.Int64Attribute("/http/request/size", c.Request.ContentLength),
octrace.Int64Attribute("/http/response/size", rw.ResponseSize()),
)
span.End()
}()
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
// Make the request logger emit log entries associated with the tracing span.
if s.Options.Prod {
annotateWithSpan := func(ctx context.Context, e *gkelogger.LogEntry) {
// Note: here 'span' is some inner span from where logging.Log(...) was
// called. We annotate log lines with spans that emitted them.
if span := octrace.FromContext(ctx); span != nil {
e.SpanID = span.SpanContext().SpanID.String()
}
}
ctx = logging.SetFactory(ctx, gkelogger.Factory(&severityTracker, gkelogger.LogEntry{
TraceID: traceID,
Operation: &gkelogger.Operation{ID: s.genUniqueID(32)},
}, annotateWithSpan))
}
ctx = caching.WithRequestCache(ctx)
c.Context = cacheContext.Wrap(ctx)
next(c)
}
// initLogging initializes the server logging.
//
// Called very early during server startup process. Many server fields may not
// be initialized yet, be careful.
//
// When running in production uses the ugly looking JSON format that is hard to
// read by humans but which is parsed by google-fluentd.
//
// To support per-request log grouping in Stackdriver Logs UI emit two log
// streams:
// * stderr: top-level HTTP requests (conceptually "200 GET /path").
// * stdout: all application logs, correlated with HTTP logs in a particular
// way (see the link below).
//
// This technique is primarily intended for GAE Flex, but it works anywhere:
// https://cloud.google.com/appengine/articles/logging#linking_app_logs_and_requests
//
// Stderr stream is also used to log all global activities that happens
// outside of any request handler (stuff like initialization, shutdown,
// background goroutines, etc).
//
// In non-production mode use the human-friendly format and a single log stream.
func (s *Server) initLogging() {
if !s.Options.Prod {
s.Context = gologger.StdConfig.Use(s.Context)
s.Context = logging.SetLevel(s.Context, logging.Debug)
return
}
if s.Options.testStdout != nil {
s.stdout = s.Options.testStdout
} else {
s.stdout = &gkelogger.Sink{Out: os.Stdout}
}
if s.Options.testStderr != nil {
s.stderr = s.Options.testStderr
} else {
s.stderr = &gkelogger.Sink{Out: os.Stderr}
}
s.Context = logging.SetFactory(s.Context,
gkelogger.Factory(s.stderr, gkelogger.LogEntry{
Operation: &gkelogger.Operation{
ID: s.genUniqueID(32), // correlate all global server logs together
},
}, nil),
)
s.Context = logging.SetLevel(s.Context, logging.Debug)
}
// initSecrets reads the initial root secret (if provided) and launches a job to
// periodically reread it.
//
// An error to read the secret when the server starts is fatal. But if the
// server managed to start successfully but can't re-read the secret later
// (e.g. the file disappeared), it logs the error and keeps using the cached
// secret.
func (s *Server) initSecrets() error {
secret, err := s.readRootSecret()
switch {
case err != nil:
return err
case secret == nil:
return nil
}
s.secrets = secrets.NewDerivedStore(*secret)
s.Context = secrets.Set(s.Context, s.secrets)
s.RunInBackground("luci.secrets", func(c context.Context) {
for {
if r := <-clock.After(c, time.Minute); r.Err != nil {
return // the context is canceled
}
secret, err := s.readRootSecret()
if secret == nil {
logging.WithError(err).Errorf(c, "Failed to re-read the root secret, using the cached one")
} else {
s.secrets.SetRoot(*secret)
}
}
})
return nil
}
// readRootSecret reads the secret from a path specified via -root-secret-path.
//
// Returns nil if the secret is not configured. Returns an error if the secret
// is configured, but could not be loaded.
func (s *Server) readRootSecret() (*secrets.Secret, error) {
switch {
case s.Options.RootSecretPath == "":
return nil, nil // not configured
case s.Options.RootSecretPath == ":dev" && !s.Options.Prod:
return &secrets.Secret{Current: []byte("dev-non-secret")}, nil
case s.Options.RootSecretPath == ":dev" && s.Options.Prod:
return nil, errors.Reason("-root-secret-path \":dev\" is not allowed in production mode").Err()
}
f, err := os.Open(s.Options.RootSecretPath)
if err != nil {
return nil, err
}
defer f.Close()
secret := &secrets.Secret{}
if err = json.NewDecoder(f).Decode(secret); err != nil {
return nil, errors.Annotate(err, "not a valid JSON").Err()
}
if len(secret.Current) == 0 {
return nil, errors.Reason("`current` field in the root secret is empty, this is not allowed").Err()
}
return secret, nil
}
// initSettings reads the initial settings and launches a job to periodically
// reread them.
//
// Does nothing is if -settings-path is not set: settings are optional. If
// -settings-path is set, it must point to a structurally valid JSON file or
// the server will fail to start.
func (s *Server) initSettings() error {
if !s.Options.Prod && s.Options.SettingsPath == "" {
// In dev mode use settings backed by memory.
s.Context = settings.Use(s.Context, settings.New(&settings.MemoryStorage{}))
} else {
// In prod mode use setting backed by a file (if any).
s.settings = &settings.ExternalStorage{}
s.Context = settings.Use(s.Context, settings.New(s.settings))
}
if s.Options.SettingsPath == "" {
return nil
}
if err := s.loadSettings(s.Context); err != nil {
return err
}
s.RunInBackground("luci.settings", func(c context.Context) {
for {
if r := <-clock.After(c, 30*time.Second); r.Err != nil {
return // the context is canceled
}
if err := s.loadSettings(c); err != nil {
logging.WithError(err).Errorf(c, "Failed to reload settings, using the cached ones")
}
}
})
return nil
}
// loadSettings loads settings from a path specified via -settings-path.
func (s *Server) loadSettings(c context.Context) error {
f, err := os.Open(s.Options.SettingsPath)
if err != nil {
return errors.Annotate(err, "failed to open settings file").Err()
}
defer f.Close()
return s.settings.Load(c, f)
}
// initAuth initializes auth system by preparing the context, pre-warming caches
// and verifying auth tokens can actually be minted (i.e. supplied credentials
// are valid).
func (s *Server) initAuth() error {
// Initialize the state in the context.
s.Context = auth.Initialize(s.Context, &auth.Config{
DBProvider: func(context.Context) (authdb.DB, error) {
db, _ := s.authDB.Load().(authdb.DB) // refreshed asynchronously in refreshAuthDB
return db, nil
},
Signer: nil, // TODO(vadimsh): Implement.
AccessTokenProvider: s.getAccessToken,
AnonymousTransport: func(context.Context) http.RoundTripper { return http.DefaultTransport },
EndUserIP: getRemoteIP,
IsDevMode: !s.Options.Prod,
})
// The default value for ClientAuth.SecretsDir is usually hardcoded to point
// to where the token cache is located on developer machines (~/.config/...).
// This location often doesn't exist when running from inside a container.
// The token cache is also not really needed for production services that use
// service accounts (they don't need cached refresh tokens). So in production
// mode totally ignore default ClientAuth.SecretsDir and use whatever was
// passed as -token-cache-dir. If it is empty (default), then no on-disk token
// cache is used at all.
//
// If -token-cache-dir was explicitly set, always use it (even in dev mode).
// This is useful when running containers locally: developer's credentials
// on the host machine can be mounted inside the container.
if s.Options.Prod || s.Options.TokenCacheDir != "" {
s.Options.ClientAuth.SecretsDir = s.Options.TokenCacheDir
}
// Note: we initialize a token source for one arbitrary set of scopes here. In
// many practical cases this is sufficient to verify that credentials are
// valid. For example, when we use service account JSON key, if we can
// generate a token with *some* scope (meaning Cloud accepted our signature),
// we can generate tokens with *any* scope, since there's no restrictions on
// what scopes are accessible to a service account, as long as the private key
// is valid (which we just verified by generating some token).
au, err := s.initTokenSource(DefaultOAuthScopes)
if err != nil {
return errors.Annotate(err, "failed to initialize the token source").Err()
}
if _, err := au.source.Token(); err != nil {
return errors.Annotate(err, "failed to mint an initial token").Err()
}
// Report who we are running as. Useful when debugging access issues.
switch email, err := au.authen.GetEmail(); {
case err == nil:
logging.Infof(s.Context, "Running as %s", email)
case err == clientauth.ErrNoEmail:
logging.Warningf(s.Context, "Running as <unknown>, cautiously proceeding...")
case err != nil:
return errors.Annotate(err, "failed to check the service account email").Err()
}
// Now initialize the AuthDB (a database with groups and auth config) and
// start a goroutine to periodically refresh it.
if err := s.initAuthDB(); err != nil {
return errors.Annotate(err, "failed to initialize AuthDB").Err()
}
return nil
}
// getAccessToken generates OAuth access tokens to use for requests made by
// the server itself.
//
// It should implement caching internally. This function may be called very
// often, concurrently, from multiple goroutines.
func (s *Server) getAccessToken(c context.Context, scopes []string) (_ *oauth2.Token, err error) {
key := strings.Join(scopes, " ")
_, span := trace.StartSpan(c, "go.chromium.org/luci/server.GetAccessToken")
span.Attribute("cr.dev/scopes", key)
defer func() { span.End(err) }()
s.authM.RLock()
au, ok := s.authPerScope[key]
s.authM.RUnlock()
if !ok {
if au, err = s.initTokenSource(scopes); err != nil {
return nil, err
}
}
return au.source.Token()
}
// initTokenSource initializes a token source for a given list of scopes.
//
// If such token source was already initialized, just returns it and its
// parent authenticator.
func (s *Server) initTokenSource(scopes []string) (scopedAuth, error) {
key := strings.Join(scopes, " ")
s.authM.Lock()
defer s.authM.Unlock()
au, ok := s.authPerScope[key]
if ok {
return au, nil
}
// Use ClientAuth as a base template for options, so users of Server{...} have
// ability to customize various aspects of token generation.
opts := s.Options.ClientAuth
opts.Scopes = scopes
// Note: we are using the root context here (not request-scoped context passed
// to getAccessToken) because the authenticator *outlives* the request (by
// being cached), thus it needs a long-living context.
ctx := logging.SetField(s.Context, "activity", "luci.auth")
au.authen = clientauth.NewAuthenticator(ctx, clientauth.SilentLogin, opts)
var err error
au.source, err = au.authen.TokenSource()
if err != nil {
// ErrLoginRequired may happen only when running the server locally using
// developer's credentials. Let them know how the problem can be fixed.
if !s.Options.Prod && err == clientauth.ErrLoginRequired {
if opts.ActAsServiceAccount != "" {
// Per clientauth.Options doc, IAM is the scope required to use
// ActAsServiceAccount feature.
scopes = []string{clientauth.OAuthScopeIAM}
}
logging.Errorf(s.Context, "Looks like you run the server locally and it doesn't have credentials for some OAuth scopes")
logging.Errorf(s.Context, "Run the following command to set them up: ")
logging.Errorf(s.Context, " $ luci-auth login -scopes %q", strings.Join(scopes, " "))
}
return scopedAuth{}, err
}
s.authPerScope[key] = au
return au, nil
}
// initAuthDB interprets -auth-db-* flags and sets up fetching of AuthDB.
func (s *Server) initAuthDB() error {
// Check flags are compatible.
switch {
case s.Options.AuthDBPath != "" && s.Options.AuthServiceHost != "":
return errors.Reason("-auth-db-path and -auth-service-host can't be used together").Err()
case s.Options.Prod && s.Options.testAuthDB == nil && s.Options.AuthDBPath == "" && s.Options.AuthServiceHost == "":
return errors.Reason("a source of AuthDB is not configured: pass either -auth-db-path or -auth-service-host flag").Err()
case s.Options.AuthServiceHost == "" && (s.Options.AuthDBDump != "" || s.Options.AuthDBSigner != ""):
return errors.Reason("-auth-db-dump and -auth-db-signer can be used only with -auth-service-host").Err()
case s.Options.AuthDBDump != "" && !strings.HasPrefix(s.Options.AuthDBDump, "gs://"):
return errors.Reason("-auth-db-dump value should start with gs://, got %q", s.Options.AuthDBDump).Err()
case strings.Contains(s.Options.AuthServiceHost, "/"):
return errors.Reason("-auth-service-host should be a plain hostname, got %q", s.Options.AuthServiceHost).Err()
}
// Fill in defaults.
if s.Options.AuthServiceHost != "" {
if s.Options.AuthDBDump == "" {
s.Options.AuthDBDump = fmt.Sprintf("gs://%s/auth-db", s.Options.AuthServiceHost)
}
if s.Options.AuthDBSigner == "" {
if !strings.HasSuffix(s.Options.AuthServiceHost, ".appspot.com") {
return errors.Reason("-auth-db-signer is required if -auth-service-host is not *.appspot.com").Err()
}
s.Options.AuthDBSigner = fmt.Sprintf("%[email protected]",
strings.TrimSuffix(s.Options.AuthServiceHost, ".appspot.com"))
}
}
// Fetch the initial copy of AuthDB. Note that this happens before we start
// the serving loop, to make sure incoming requests have some AuthDB to use.
if err := s.refreshAuthDB(s.Context); err != nil {
return errors.Annotate(err, "failed to load the initial AuthDB version").Err()
}
// Periodically refresh it in the background.
s.RunInBackground("luci.authdb", func(c context.Context) {
for {
if r := <-clock.After(c, 30*time.Second); r.Err != nil {
return // the context is canceled
}
if err := s.refreshAuthDB(c); err != nil {
logging.WithError(err).Errorf(c, "Failed to reload AuthDB, using the cached one")
}
}
})
return nil
}
// refreshAuthDB reloads AuthDB from the source and stores it in memory.
func (s *Server) refreshAuthDB(c context.Context) error {
cur, _ := s.authDB.Load().(authdb.DB)
db, err := s.fetchAuthDB(c, cur)
if err != nil {
return err
}
s.authDB.Store(db)
return nil
}
// fetchAuthDB fetches the most recent copy of AuthDB from the external source.
//
// 'cur' is the currently used AuthDB or nil if fetching it for the first time.
// Returns 'cur' as is if it's already fresh.
func (s *Server) fetchAuthDB(c context.Context, cur authdb.DB) (authdb.DB, error) {
if s.Options.testAuthDB != nil {
return s.Options.testAuthDB, nil
}
// Loading from a local file.
//
// TODO(vadimsh): Get rid of this once -auth-service-host is deployed.
if s.Options.AuthDBPath != "" {
r, err := os.Open(s.Options.AuthDBPath)
if err != nil {
return nil, errors.Annotate(err, "failed to open AuthDB file").Err()
}
defer r.Close()
db, err := authdb.SnapshotDBFromTextProto(r)
if err != nil {
return nil, errors.Annotate(err, "failed to load AuthDB file").Err()
}
return db, nil
}
// Loading from a GCS dump (s.Options.AuthDB* are validated here already).
if s.Options.AuthDBDump != "" {
c, cancel := clock.WithTimeout(c, 5*time.Minute)
defer cancel()
fetcher := dump.Fetcher{
StorageDumpPath: s.Options.AuthDBDump[len("gs://"):],
AuthServiceURL: "https://" + s.Options.AuthServiceHost,
AuthServiceAccount: s.Options.AuthDBSigner,
OAuthScopes: DefaultOAuthScopes,
}
curSnap, _ := cur.(*authdb.SnapshotDB)
snap, err := fetcher.FetchAuthDB(c, curSnap)
if err != nil {
return nil, errors.Annotate(err, "fetching from GCS dump failed").Err()
}
return snap, nil
}
// In dev mode default to "allow everything".
if !s.Options.Prod {
return authdb.DevServerDB{}, nil
}
return nil, errors.Reason("a source of AuthDB is not configured").Err()
}
// initTSMon initializes time series monitoring state if tsmon is enabled.
func (s *Server) initTSMon() error {
switch {
case s.Options.TsMonAccount == "":
logging.Infof(s.Context, "Disabling tsmon, -ts-mon-account is not set")
return nil
case s.Options.TsMonServiceName == "":
logging.Infof(s.Context, "Disabling tsmon, -ts-mon-service-name is not set")
return nil
case s.Options.TsMonJobName == "":
logging.Infof(s.Context, "Disabling tsmon, -ts-mon-job-name is not set")
return nil
}
s.tsmon = &tsmon.State{
IsDevMode: !s.Options.Prod,
Settings: &tsmon.Settings{
Enabled: true,
ProdXAccount: s.Options.TsMonAccount,
FlushIntervalSec: 60,
ReportRuntimeStats: true,
},
Target: func(c context.Context) target.Task {
// TODO(vadimsh): We pretend to be a GAE app for now to be able to
// reuse existing dashboards. Each pod pretends to be a separate GAE
// version. That way we can stop worrying about TaskNumAllocator and just
// use 0 (since there'll be only one task per "version"). This looks
// chaotic for deployments with large number of pods.
return target.Task{
DataCenter: "appengine",
ServiceName: s.Options.TsMonServiceName,
JobName: s.Options.TsMonJobName,
HostName: s.hostname,
}
},
}
// Report our image version as a metric, useful to monitor rollouts.
tsmoncommon.RegisterCallbackIn(s.Context, func(ctx context.Context) {
versionMetric.Set(ctx, s.Options.imageVersion())
})
// Periodically flush metrics.
s.RunInBackground("luci.tsmon", s.tsmon.FlushPeriodically)
return nil
}
// initTracing initialized StackDriver opencensus.io trace exporter.
func (s *Server) initTracing() error {
if !s.Options.shouldEnableTracing() {
return nil
}
// Parse -trace-sampling spec to get a sampler.
sampling := s.Options.TraceSampling
if sampling == "" {
sampling = "0.1qps"
}
logging.Infof(s.Context, "Setting up StackDriver trace exports to %q (%s)", s.Options.CloudProject, sampling)
var err error
if s.sampler, err = internal.Sampler(sampling); err != nil {
return errors.Annotate(err, "bad -trace-sampling").Err()
}
// Grab the token source to call StackDriver API.
auth, err := s.initTokenSource(DefaultOAuthScopes)
if err != nil {
return errors.Annotate(err, "failed to initialize token source").Err()
}
opts := []option.ClientOption{option.WithTokenSource(auth.source)}
// Register the trace uploader. It is also accidentally metrics uploader, but
// we shouldn't be using metrics (we have tsmon instead).
exporter, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: s.Options.CloudProject,
MonitoringClientOptions: opts, // note: this should be effectively unused
TraceClientOptions: opts,
BundleDelayThreshold: 10 * time.Second,
BundleCountThreshold: 512,
DefaultTraceAttributes: map[string]interface{}{
"cr.dev/image": s.Options.ContainerImageID,
"cr.dev/service": s.Options.TsMonServiceName,
"cr.dev/job": s.Options.TsMonJobName,
"cr.dev/host": s.hostname,
},
OnError: func(err error) {
logging.Errorf(s.Context, "StackDriver error: %s", err)
},
})
if err != nil {
return err
}
octrace.RegisterExporter(exporter)
// No matter what, do not sample "random" top-level spans from background
// goroutines we don't control. We'll start top spans ourselves in
// startRequestSpan.
octrace.ApplyConfig(octrace.Config{DefaultSampler: octrace.NeverSample()})
// Do the final flush before exiting.
s.RegisterCleanup(exporter.Flush)
return nil
}
// initRedis sets up Redis connection pool, if enabled.
//
// Does nothing is RedisAddr options is unset. In this case redisconn.Get will
// return ErrNotConfigured.
func (s *Server) initRedis() error {
if s.Options.RedisAddr == "" {
return nil
}
s.redisPool = redisconn.NewPool(s.Options.RedisAddr, s.Options.RedisDB)
s.Context = redisconn.UsePool(s.Context, s.redisPool)
// Use Redis as caching.BlobCache provider.
s.Context = caching.WithGlobalCache(s.Context, func(namespace string) caching.BlobCache {
return &internal.RedisBlobCache{Prefix: fmt.Sprintf("luci.blobcache.%s:", namespace)}
})
// Close all connections when exiting gracefully.
s.RegisterCleanup(func() {
if err := s.redisPool.Close(); err != nil {
logging.Warningf(s.Context, "Failed to close Redis pool - %s", err)
}
})
// Populate pool metrics on tsmon flush.
tsmoncommon.RegisterCallbackIn(s.Context, func(ctx context.Context) {
redisconn.ReportStats(ctx, s.redisPool, "default")
})
return nil
}
// initDatastoreClient initializes Cloud Datastore client, if enabled.
func (s *Server) initDatastoreClient() error {
if s.Options.CloudProject == "" {
return nil
}
logging.Infof(s.Context, "Setting up datastore client for project %q", s.Options.CloudProject)
// Enable auth only when using the real datastore.
var opts []option.ClientOption
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr == "" {
auth, err := s.initTokenSource(DefaultOAuthScopes)
if err != nil {
return errors.Annotate(err, "failed to initialize token source").Err()
}
opts = []option.ClientOption{option.WithTokenSource(auth.source)}
}
client, err := datastore.NewClient(s.Context, s.Options.CloudProject, opts...)
if err != nil {
return errors.Annotate(err, "failed to instantiate the client").Err()
}
s.RegisterCleanup(func() {
if err := client.Close(); err != nil {
logging.Warningf(s.Context, "Failed to close datastore client - %s", err)
}
})
s.dsClient = client
return nil
}
// initCloudContext makes the context compatible with the supported portion of
// 'go.chromium.org/gae' library.
func (s *Server) initCloudContext() error {
s.Context = (&cloud.ConfigLite{
IsDev: !s.Options.Prod,
ProjectID: s.Options.CloudProject,
DS: s.dsClient,
}).Use(s.Context)
return nil
}
// initMainPort initializes the server on options.HTTPAddr port.
func (s *Server) initMainPort() error {
s.Routes = s.RegisterHTTP(s.Options.HTTPAddr)
// Expose public pRPC endpoints (see also ListenAndServe where we put the
// final interceptors).
s.PRPC = &prpc.Server{
Authenticator: &auth.Authenticator{
Methods: []auth.Method{
&auth.GoogleOAuth2Method{
Scopes: []string{clientauth.OAuthScopeEmail},
},
},
},
}
discovery.Enable(s.PRPC)
s.PRPC.InstallHandlers(s.Routes, router.MiddlewareChain{})
// Install RPCExplorer web app at "/rpcexplorer/".
rpcexplorer.Install(s.Routes)
return nil
}
// initAdminPort initializes the server on options.AdminAddr port.
func (s *Server) initAdminPort() error {
// Install endpoints accessible through the admin port only.
admin := s.RegisterHTTP(s.Options.AdminAddr)
admin.GET("/", router.MiddlewareChain{}, func(c *router.Context) {
http.Redirect(c.Writer, c.Request, "/admin/portal", http.StatusFound)
})
portal.InstallHandlers(admin, router.MiddlewareChain{}, portal.AssumeTrustedPort)
// Install pprof endpoints on the admin port. Note that they must not be
// exposed via the main serving port, since they do no authentication and
// may leak internal information. Also note that pprof handlers rely on
// routing structure not supported by our router, so we do a bit of manual
// routing.
admin.GET("/debug/pprof/*path", router.MiddlewareChain{}, func(c *router.Context) {
switch c.Params.ByName("path") {
case "cmdline":
pprof.Cmdline(c.Writer, c.Request)
case "profile":
pprof.Profile(c.Writer, c.Request)
case "symbol":
pprof.Symbol(c.Writer, c.Request)
case "trace":
pprof.Trace(c.Writer, c.Request)
default:
pprof.Index(c.Writer, c.Request)
}
})
return nil
}
// startRequestSpan opens a new per-request trace span.
func (s *Server) startRequestSpan(ctx context.Context, r *http.Request, skipSampling bool) (context.Context, *octrace.Span) {
var sampler octrace.Sampler
if skipSampling {
sampler = octrace.NeverSample()
} else {
sampler = s.sampler
}
ctx, span := octrace.StartSpan(ctx, "HTTP:"+r.URL.Path,
octrace.WithSpanKind(octrace.SpanKindServer),
octrace.WithSampler(sampler),
)
// Link this span to a parent span propagated through X-Cloud-Trace-Context
// header (if any).
if parent, ok := cloudTraceFormat.SpanContextFromRequest(r); ok {
span.AddLink(octrace.Link{
TraceID: parent.TraceID,
SpanID: parent.SpanID,
Type: octrace.LinkTypeParent,
})
}
// Request info (these are recognized by StackDriver natively).
span.AddAttributes(
octrace.StringAttribute("/http/host", r.Host),
octrace.StringAttribute("/http/method", r.Method),
octrace.StringAttribute("/http/path", r.URL.Path),
)
return ctx, span
}
// getRemoteIP extracts end-user IP address from X-Forwarded-For header.
func getRemoteIP(r *http.Request) string {
// X-Forwarded-For header is set by Cloud Load Balancer and has format:
// [<untrusted part>,]<IP that connected to LB>,<unimportant>[,<more>].
//
// <untrusted part> is present if the original request from the Internet comes
// with X-Forwarded-For header. We can't trust IPs specified there. We assume
// Cloud Load Balancer sanitizes the format of this field though.
//
// <IP that connected to LB> is what we are after.
//
// <unimportant> is "global forwarding rule external IP" which we don't care
// about.
//
// <more> is present only if we proxy the request through more layers of
// load balancers *while it is already inside GKE cluster*. We assume we don't
// do that (if we ever do, Options{...} should be extended with a setting that
// specifies how many layers of load balancers to skip to get to the original
// IP).
//
// See https://cloud.google.com/load-balancing/docs/https for more info.
forwardedFor := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
if len(forwardedFor) >= 2 {
return forwardedFor[len(forwardedFor)-2]
}
// Fallback to the peer IP if X-Forwarded-For is not set. Happens when
// connecting to the server's port directly from within the cluster.
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return "0.0.0.0"
}
return ip
}
// getRequestURL reconstructs original request URL to log it (best effort).
func getRequestURL(r *http.Request) string {
proto := r.Header.Get("X-Forwarded-Proto")
if proto != "https" {
proto = "http"
}
host := r.Host
if r.Host == "" {
host = "127.0.0.1"
}
return fmt.Sprintf("%s://%s%s", proto, host, r.RequestURI)
}
// isHealthCheckRequest is true if the request appears to be coming from
// a known health check probe.
func isHealthCheckRequest(r *http.Request) bool {
if r.URL.Path == healthEndpoint {
switch ua := r.UserAgent(); {
case strings.HasPrefix(ua, "kube-probe/"): // Kubernetes
return true
case strings.HasPrefix(ua, "GoogleHC/"): // Cloud Load Balancer
return true
}
}
return false
}
| [
"\"DATASTORE_EMULATOR_HOST\""
]
| []
| [
"DATASTORE_EMULATOR_HOST"
]
| [] | ["DATASTORE_EMULATOR_HOST"] | go | 1 | 0 | |
vendor/github.com/docker/docker/integration-cli/check_test.go | package main
import (
"context"
"fmt"
"io/ioutil"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strconv"
"sync"
"syscall"
"testing"
"time"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/daemon"
"github.com/docker/docker/integration-cli/environment"
testdaemon "github.com/docker/docker/internal/test/daemon"
ienv "github.com/docker/docker/internal/test/environment"
"github.com/docker/docker/internal/test/fakestorage"
"github.com/docker/docker/internal/test/fixtures/plugin"
"github.com/docker/docker/internal/test/registry"
"github.com/docker/docker/pkg/reexec"
"github.com/go-check/check"
)
const (
// the private registry to use for tests
privateRegistryURL = registry.DefaultURL
// path to containerd's ctr binary
ctrBinary = "ctr"
// the docker daemon binary to use
dockerdBinary = "dockerd"
)
var (
testEnv *environment.Execution
// the docker client binary to use
dockerBinary = ""
)
func init() {
var err error
reexec.Init() // This is required for external graphdriver tests
testEnv, err = environment.New()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func TestMain(m *testing.M) {
dockerBinary = testEnv.DockerBinary()
err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
testEnv.Print()
os.Exit(m.Run())
}
func Test(t *testing.T) {
cli.SetTestEnvironment(testEnv)
fakestorage.SetTestEnvironment(&testEnv.Execution)
ienv.ProtectAll(t, &testEnv.Execution)
check.TestingT(t)
}
func init() {
check.Suite(&DockerSuite{})
}
type DockerSuite struct {
}
func (s *DockerSuite) OnTimeout(c *check.C) {
if testEnv.IsRemoteDaemon() {
return
}
path := filepath.Join(os.Getenv("DEST"), "docker.pid")
b, err := ioutil.ReadFile(path)
if err != nil {
c.Fatalf("Failed to get daemon PID from %s\n", path)
}
rawPid, err := strconv.ParseInt(string(b), 10, 32)
if err != nil {
c.Fatalf("Failed to parse pid from %s: %s\n", path, err)
}
daemonPid := int(rawPid)
if daemonPid > 0 {
testdaemon.SignalDaemonDump(daemonPid)
}
}
func (s *DockerSuite) TearDownTest(c *check.C) {
testEnv.Clean(c)
}
func init() {
check.Suite(&DockerRegistrySuite{
ds: &DockerSuite{},
})
}
type DockerRegistrySuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistrySuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.reg = registry.NewV2(c)
s.reg.WaitReady(c)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
if s.reg != nil {
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerRegistryAuthHtpasswdSuite{
ds: &DockerSuite{},
})
}
type DockerRegistryAuthHtpasswdSuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.reg = registry.NewV2(c, registry.Htpasswd)
s.reg.WaitReady(c)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
if s.reg != nil {
out, err := s.d.Cmd("logout", privateRegistryURL)
c.Assert(err, check.IsNil, check.Commentf("%s", out))
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerRegistryAuthTokenSuite{
ds: &DockerSuite{},
})
}
type DockerRegistryAuthTokenSuite struct {
ds *DockerSuite
reg *registry.V2
d *daemon.Daemon
}
func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting, testEnv.IsLocalDaemon)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
if s.reg != nil {
out, err := s.d.Cmd("logout", privateRegistryURL)
c.Assert(err, check.IsNil, check.Commentf("%s", out))
s.reg.Close()
}
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) {
if s == nil {
c.Fatal("registry suite isn't initialized")
}
s.reg = registry.NewV2(c, registry.Token(tokenURL))
s.reg.WaitReady(c)
}
func init() {
check.Suite(&DockerDaemonSuite{
ds: &DockerSuite{},
})
}
type DockerDaemonSuite struct {
ds *DockerSuite
d *daemon.Daemon
}
func (s *DockerDaemonSuite) OnTimeout(c *check.C) {
s.d.DumpStackAndQuit()
}
func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
}
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
if s.d != nil {
s.d.Stop(c)
}
s.ds.TearDownTest(c)
}
func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil {
// ignore errors here
// not cleaning up sockets is not really an error
return nil
}
if fi.Mode() == os.ModeSocket {
syscall.Unlink(path)
}
return nil
})
os.RemoveAll(testdaemon.SockRoot)
}
const defaultSwarmPort = 2477
func init() {
check.Suite(&DockerSwarmSuite{
ds: &DockerSuite{},
})
}
type DockerSwarmSuite struct {
server *httptest.Server
ds *DockerSuite
daemons []*daemon.Daemon
daemonsLock sync.Mutex // protect access to daemons
portIndex int
}
func (s *DockerSwarmSuite) OnTimeout(c *check.C) {
s.daemonsLock.Lock()
defer s.daemonsLock.Unlock()
for _, d := range s.daemons {
d.DumpStackAndQuit()
}
}
func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
testRequires(c, DaemonIsLinux, testEnv.IsLocalDaemon)
}
func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Daemon {
d := daemon.New(c, dockerBinary, dockerdBinary,
testdaemon.WithEnvironment(testEnv.Execution),
testdaemon.WithSwarmPort(defaultSwarmPort+s.portIndex),
)
if joinSwarm {
if len(s.daemons) > 0 {
d.StartAndSwarmJoin(c, s.daemons[0].Daemon, manager)
} else {
d.StartAndSwarmInit(c)
}
} else {
d.StartNode(c)
}
s.portIndex++
s.daemonsLock.Lock()
s.daemons = append(s.daemons, d)
s.daemonsLock.Unlock()
return d
}
func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
testRequires(c, DaemonIsLinux)
s.daemonsLock.Lock()
for _, d := range s.daemons {
if d != nil {
d.Stop(c)
d.Cleanup(c)
}
}
s.daemons = nil
s.daemonsLock.Unlock()
s.portIndex = 0
s.ds.TearDownTest(c)
}
func init() {
check.Suite(&DockerPluginSuite{
ds: &DockerSuite{},
})
}
type DockerPluginSuite struct {
ds *DockerSuite
registry *registry.V2
}
func (ps *DockerPluginSuite) registryHost() string {
return privateRegistryURL
}
func (ps *DockerPluginSuite) getPluginRepo() string {
return path.Join(ps.registryHost(), "plugin", "basic")
}
func (ps *DockerPluginSuite) getPluginRepoWithTag() string {
return ps.getPluginRepo() + ":" + "latest"
}
func (ps *DockerPluginSuite) SetUpSuite(c *check.C) {
testRequires(c, DaemonIsLinux, RegistryHosting)
ps.registry = registry.NewV2(c)
ps.registry.WaitReady(c)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil)
c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin"))
}
func (ps *DockerPluginSuite) TearDownSuite(c *check.C) {
if ps.registry != nil {
ps.registry.Close()
}
}
func (ps *DockerPluginSuite) TearDownTest(c *check.C) {
ps.ds.TearDownTest(c)
}
func (ps *DockerPluginSuite) OnTimeout(c *check.C) {
ps.ds.OnTimeout(c)
}
| [
"\"DEST\""
]
| []
| [
"DEST"
]
| [] | ["DEST"] | go | 1 | 0 | |
internal/redis/redis.go | package redis
import (
"os"
"github.com/go-redis/redis"
)
// New is
func New() *redis.Client {
redisClient := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_URI"),
})
return redisClient
}
| [
"\"REDIS_URI\""
]
| []
| [
"REDIS_URI"
]
| [] | ["REDIS_URI"] | go | 1 | 0 | |
gradle-server/src/main/java/com/github/badsyntax/gradle/GradleBuildRunner.java | package com.github.badsyntax.gradle;
import com.github.badsyntax.gradle.exceptions.GradleBuildRunnerException;
import com.github.badsyntax.gradle.exceptions.GradleConnectionException;
import com.google.common.base.Strings;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.gradle.tooling.BuildLauncher;
import org.gradle.tooling.CancellationToken;
import org.gradle.tooling.GradleConnector;
import org.gradle.tooling.ProjectConnection;
import org.gradle.tooling.events.OperationType;
import org.gradle.tooling.events.ProgressListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GradleBuildRunner {
private static final String JAVA_TOOL_OPTIONS_ENV = "JAVA_TOOL_OPTIONS";
private static final Logger logger = LoggerFactory.getLogger(GradleBuildRunner.class.getName());
private String projectDir;
private List<String> args;
private GradleConfig gradleConfig;
private String cancellationKey;
private Boolean colorOutput;
private int javaDebugPort;
private OutputStream standardOutputStream;
private OutputStream standardErrorStream;
private InputStream standardInputStream;
private ProgressListener progressListener;
private Boolean javaDebugCleanOutputCache;
public GradleBuildRunner(
String projectDir,
List<String> args,
GradleConfig gradleConfig,
String cancellationKey,
Boolean colorOutput,
int javaDebugPort,
Boolean javaDebugCleanOutputCache) {
this.projectDir = projectDir;
this.args = args;
this.gradleConfig = gradleConfig;
this.cancellationKey = cancellationKey;
this.colorOutput = colorOutput;
this.javaDebugPort = javaDebugPort;
this.javaDebugCleanOutputCache = javaDebugCleanOutputCache;
}
public GradleBuildRunner(
String projectDir, List<String> args, GradleConfig gradleConfig, String cancellationKey) {
this(projectDir, args, gradleConfig, cancellationKey, true, 0, false);
}
public GradleBuildRunner setStandardOutputStream(OutputStream standardOutputStream) {
this.standardOutputStream = standardOutputStream;
return this;
}
public GradleBuildRunner setStandardInputStream(InputStream standardInputStream) {
this.standardInputStream = standardInputStream;
return this;
}
public GradleBuildRunner setStandardErrorStream(OutputStream standardErrorStream) {
this.standardErrorStream = standardErrorStream;
return this;
}
public GradleBuildRunner setProgressListener(ProgressListener progressListener) {
this.progressListener = progressListener;
return this;
}
public void run() throws GradleConnectionException, IOException, GradleBuildRunnerException {
if (Boolean.TRUE.equals(args.isEmpty())) {
throw new GradleBuildRunnerException("No args supplied");
}
GradleConnector gradleConnector = GradleProjectConnector.build(projectDir, gradleConfig);
try (ProjectConnection connection = gradleConnector.connect()) {
runBuild(connection);
} finally {
GradleBuildCancellation.clearToken(cancellationKey);
}
}
private void runBuild(ProjectConnection connection)
throws GradleBuildRunnerException, IOException {
Set<OperationType> progressEvents = new HashSet<>();
progressEvents.add(OperationType.PROJECT_CONFIGURATION);
progressEvents.add(OperationType.TASK);
progressEvents.add(OperationType.TRANSFORM);
CancellationToken cancellationToken = GradleBuildCancellation.buildToken(cancellationKey);
Boolean isDebugging = javaDebugPort != 0;
BuildLauncher build =
connection
.newBuild()
.withCancellationToken(cancellationToken)
.addProgressListener(progressListener, progressEvents)
.setStandardOutput(standardOutputStream)
.setStandardError(standardErrorStream)
.setColorOutput(colorOutput)
.withArguments(buildArguments(isDebugging));
if (this.standardInputStream != null) {
build.setStandardInput(standardInputStream);
}
if (Boolean.TRUE.equals(isDebugging)) {
build.setEnvironmentVariables(buildJavaEnvVarsWithJwdp(javaDebugPort));
}
if (!Strings.isNullOrEmpty(gradleConfig.getJvmArguments())) {
build.setJvmArguments(gradleConfig.getJvmArguments());
}
build.run();
}
private List<String> buildArguments(Boolean isDebugging) throws GradleBuildRunnerException {
if (Boolean.FALSE.equals(isDebugging) || Boolean.FALSE.equals(javaDebugCleanOutputCache)) {
return args;
}
if (args.size() > 1) {
throw new GradleBuildRunnerException("Unexpected multiple tasks when debugging");
}
List<String> parts = new LinkedList<>(Arrays.asList(args.get(0).split(":")));
String taskName = parts.get(parts.size() - 1);
parts.remove(parts.size() - 1);
String capitalizedTaskName = taskName.substring(0, 1).toUpperCase() + taskName.substring(1);
parts.add("clean" + capitalizedTaskName);
String cleanTaskName = String.join(":", parts);
List<String> newArgs = new ArrayList<>(args);
newArgs.add(0, cleanTaskName);
logger.warn("Adding {} to ensure task output is cleared before debugging", cleanTaskName);
return newArgs;
}
private static Map<String, String> buildJavaEnvVarsWithJwdp(int javaDebugPort) {
HashMap<String, String> envVars = new HashMap<>(System.getenv());
envVars.put(
JAVA_TOOL_OPTIONS_ENV,
String.format(
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=localhost:%d",
javaDebugPort));
return envVars;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
internal/env/env.go | package env
import (
"os"
"strconv"
"time"
"github.com/joho/godotenv"
"github.com/lus/pasty/internal/static"
)
// Load loads an optional .env file
func Load() {
godotenv.Load()
}
// MustString returns the content of the environment variable with the given key or the given fallback
func MustString(key, fallback string) string {
value, found := os.LookupEnv(static.EnvironmentVariablePrefix + key)
if !found {
return fallback
}
return value
}
// MustBool uses MustString and parses it into a boolean
func MustBool(key string, fallback bool) bool {
parsed, _ := strconv.ParseBool(MustString(key, strconv.FormatBool(fallback)))
return parsed
}
// MustInt uses MustString and parses it into an integer
func MustInt(key string, fallback int) int {
parsed, _ := strconv.Atoi(MustString(key, strconv.Itoa(fallback)))
return parsed
}
// MustDuration uses MustString and parses it into a duration
func MustDuration(key string, fallback time.Duration) time.Duration {
parsed, _ := time.ParseDuration(MustString(key, fallback.String()))
return parsed
}
| []
| []
| []
| [] | [] | go | null | null | null |
pkg/kubectl/cmd/diff.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/apply/parse"
"k8s.io/kubernetes/pkg/kubectl/apply/strategy"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions"
"k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/utils/exec"
)
var (
diffLong = templates.LongDesc(i18n.T(`
Diff configurations specified by filename or stdin between their local,
last-applied, live and/or "merged" versions.
LOCAL and LIVE versions are diffed by default. Other available keywords
are MERGED and LAST.
Output is always YAML.
KUBERNETES_EXTERNAL_DIFF environment variable can be used to select your own
diff command. By default, the "diff" command available in your path will be
run with "-u" (unicode) and "-N" (treat new files as empty) options.`))
diffExample = templates.Examples(i18n.T(`
# Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions
kubectl alpha diff -f pod.json
# When one version is specified, diff that version against LIVE
cat service.yaml | kubectl alpha diff -f - MERGED
# Or specify both versions
kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`))
)
type DiffOptions struct {
FilenameOptions resource.FilenameOptions
}
func isValidArgument(arg string) error {
switch arg {
case "LOCAL", "LIVE", "LAST", "MERGED":
return nil
default:
return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg)
}
}
func parseDiffArguments(args []string) (string, string, error) {
if len(args) > 2 {
return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.")
}
// Default values
from := "LOCAL"
to := "LIVE"
if len(args) > 0 {
from = args[0]
}
if len(args) > 1 {
to = args[1]
}
if err := isValidArgument(to); err != nil {
return "", "", err
}
if err := isValidArgument(from); err != nil {
return "", "", err
}
return from, to, nil
}
func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {
var options DiffOptions
diff := DiffProgram{
Exec: exec.New(),
IOStreams: streams,
}
cmd := &cobra.Command{
Use: "diff -f FILENAME",
DisableFlagsInUseLine: true,
Short: i18n.T("Diff different versions of configurations"),
Long: diffLong,
Example: diffExample,
Run: func(cmd *cobra.Command, args []string) {
from, to, err := parseDiffArguments(args)
cmdutil.CheckErr(err)
cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to))
},
}
usage := "contains the configuration to diff"
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmd.MarkFlagRequired("filename")
return cmd
}
// DiffProgram finds and run the diff program. The value of
// KUBERNETES_EXTERNAL_DIFF environment variable will be used a diff
// program. By default, `diff(1)` will be used.
type DiffProgram struct {
Exec exec.Interface
genericclioptions.IOStreams
}
func (d *DiffProgram) getCommand(args ...string) exec.Cmd {
diff := ""
if envDiff := os.Getenv("KUBERNETES_EXTERNAL_DIFF"); envDiff != "" {
diff = envDiff
} else {
diff = "diff"
args = append([]string{"-u", "-N"}, args...)
}
cmd := d.Exec.Command(diff, args...)
cmd.SetStdout(d.Out)
cmd.SetStderr(d.ErrOut)
return cmd
}
// Run runs the detected diff program. `from` and `to` are the directory to diff.
func (d *DiffProgram) Run(from, to string) error {
d.getCommand(from, to).Run() // Ignore diff return code
return nil
}
// Printer is used to print an object.
type Printer struct{}
// Print the object inside the writer w.
func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error {
if obj == nil {
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
return err
}
_, err = w.Write(data)
return err
}
// DiffVersion gets the proper version of objects, and aggregate them into a directory.
type DiffVersion struct {
Dir *Directory
Name string
}
// NewDiffVersion creates a new DiffVersion with the named version.
func NewDiffVersion(name string) (*DiffVersion, error) {
dir, err := CreateDirectory(name)
if err != nil {
return nil, err
}
return &DiffVersion{
Dir: dir,
Name: name,
}, nil
}
func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) {
switch v.Name {
case "LIVE":
return obj.Live()
case "MERGED":
return obj.Merged()
case "LOCAL":
return obj.Local()
case "LAST":
return obj.Last()
}
return nil, fmt.Errorf("Unknown version: %v", v.Name)
}
// Print prints the object using the printer into a new file in the directory.
func (v *DiffVersion) Print(obj Object, printer Printer) error {
vobj, err := v.getObject(obj)
if err != nil {
return err
}
f, err := v.Dir.NewFile(obj.Name())
if err != nil {
return err
}
defer f.Close()
return printer.Print(vobj, f)
}
// Directory creates a new temp directory, and allows to easily create new files.
type Directory struct {
Name string
}
// CreateDirectory does create the actual disk directory, and return a
// new representation of it.
func CreateDirectory(prefix string) (*Directory, error) {
name, err := ioutil.TempDir("", prefix+"-")
if err != nil {
return nil, err
}
return &Directory{
Name: name,
}, nil
}
// NewFile creates a new file in the directory.
func (d *Directory) NewFile(name string) (*os.File, error) {
return os.OpenFile(filepath.Join(d.Name, name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
}
// Delete removes the directory recursively.
func (d *Directory) Delete() error {
return os.RemoveAll(d.Name)
}
// Object is an interface that let's you retrieve multiple version of
// it.
type Object interface {
Local() (map[string]interface{}, error)
Live() (map[string]interface{}, error)
Last() (map[string]interface{}, error)
Merged() (map[string]interface{}, error)
Name() string
}
// InfoObject is an implementation of the Object interface. It gets all
// the information from the Info object.
type InfoObject struct {
Remote runtime.Unstructured
Info *resource.Info
Encoder runtime.Encoder
Parser *parse.Factory
}
var _ Object = &InfoObject{}
func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) {
m := map[string]interface{}{}
if len(data) == 0 {
return m, nil
}
err := json.Unmarshal(data, &m)
return m, err
}
func (obj InfoObject) Local() (map[string]interface{}, error) {
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Live() (map[string]interface{}, error) {
if obj.Remote == nil {
return nil, nil // Object doesn't exist on cluster.
}
return obj.Remote.UnstructuredContent(), nil
}
func (obj InfoObject) Merged() (map[string]interface{}, error) {
local, err := obj.Local()
if err != nil {
return nil, err
}
live, err := obj.Live()
if err != nil {
return nil, err
}
last, err := obj.Last()
if err != nil {
return nil, err
}
if live == nil || last == nil {
return local, nil // We probably don't have a live version, merged is local.
}
elmt, err := obj.Parser.CreateElement(last, local, live)
if err != nil {
return nil, err
}
result, err := elmt.Merge(strategy.Create(strategy.Options{}))
return result.MergedResult.(map[string]interface{}), err
}
func (obj InfoObject) Last() (map[string]interface{}, error) {
if obj.Remote == nil {
return nil, nil // No object is live, return empty
}
accessor, err := meta.Accessor(obj.Remote)
if err != nil {
return nil, err
}
annots := accessor.GetAnnotations()
if annots == nil {
return nil, nil // Not an error, just empty.
}
return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation]))
}
func (obj InfoObject) Name() string {
return obj.Info.Name
}
// Differ creates two DiffVersion and diffs them.
type Differ struct {
From *DiffVersion
To *DiffVersion
}
func NewDiffer(from, to string) (*Differ, error) {
differ := Differ{}
var err error
differ.From, err = NewDiffVersion(from)
if err != nil {
return nil, err
}
differ.To, err = NewDiffVersion(to)
if err != nil {
differ.From.Dir.Delete()
return nil, err
}
return &differ, nil
}
// Diff diffs to versions of a specific object, and print both versions to directories.
func (d *Differ) Diff(obj Object, printer Printer) error {
if err := d.From.Print(obj, printer); err != nil {
return err
}
if err := d.To.Print(obj, printer); err != nil {
return err
}
return nil
}
// Run runs the diff program against both directories.
func (d *Differ) Run(diff *DiffProgram) error {
return diff.Run(d.From.Dir.Name, d.To.Dir.Name)
}
// TearDown removes both temporary directories recursively.
func (d *Differ) TearDown() {
d.From.Dir.Delete() // Ignore error
d.To.Dir.Delete() // Ignore error
}
type Downloader struct {
mapper meta.RESTMapper
dclient dynamic.Interface
ns string
}
func NewDownloader(f cmdutil.Factory) (*Downloader, error) {
var err error
var d Downloader
d.mapper, err = f.RESTMapper()
if err != nil {
return nil, err
}
d.dclient, err = f.DynamicClient()
if err != nil {
return nil, err
}
d.ns, _, _ = f.DefaultNamespace()
return &d, nil
}
func (d *Downloader) Download(info *resource.Info) (*unstructured.Unstructured, error) {
gvk := info.Object.GetObjectKind().GroupVersionKind()
mapping, err := d.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
var resource dynamic.ResourceInterface
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
if info.Namespace == "" {
info.Namespace = d.ns
}
resource = d.dclient.Resource(mapping.Resource).Namespace(info.Namespace)
case meta.RESTScopeNameRoot:
resource = d.dclient.Resource(mapping.Resource)
}
return resource.Get(info.Name, metav1.GetOptions{})
}
// RunDiff uses the factory to parse file arguments, find the version to
// diff, and find each Info object for each files, and runs against the
// differ.
func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error {
openapi, err := f.OpenAPISchema()
if err != nil {
return err
}
parser := &parse.Factory{Resources: openapi}
differ, err := NewDiffer(from, to)
if err != nil {
return err
}
defer differ.TearDown()
printer := Printer{}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
r := f.NewBuilder().
Unstructured().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, &options.FilenameOptions).
Local().
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
dl, err := NewDownloader(f)
if err != nil {
return err
}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
remote, _ := dl.Download(info)
obj := InfoObject{
Remote: remote,
Info: info,
Parser: parser,
Encoder: cmdutil.InternalVersionJSONEncoder(),
}
return differ.Diff(obj, printer)
})
if err != nil {
return err
}
differ.Run(diff)
return nil
}
| [
"\"KUBERNETES_EXTERNAL_DIFF\""
]
| []
| [
"KUBERNETES_EXTERNAL_DIFF"
]
| [] | ["KUBERNETES_EXTERNAL_DIFF"] | go | 1 | 0 | |
preprocess.py | import os
import glob
import argparse
from tqdm import tqdm
from pathlib import Path
import cv2
def parse_args():
parser = argparse.ArgumentParser(description='Prepare data for nsff training')
parser.add_argument('--root_dir', type=str, help='data root directory', required=True)
parser.add_argument('--cuda-device',type=str,default='0',help='cuda device to use')
parser.add_argument('--max-width', type=int, default=1280, help='max image width')
parser.add_argument('--max-height', type=int, default=720, help='max image height')
parser.add_argument(
'--images-resized', default='images_resized', help='location for resized/renamed images')
parser.add_argument('--image_input', default='frames', help='location for original images')
parser.add_argument(
'--undistorted-output', default='images', help='location of undistorted images')
parser.add_argument(
'--overwrite', default=False,action='store_true', help='overwrite cache')
args = parser.parse_args()
return args
def resize_frames(args):
vid_name = os.path.basename(args.root_dir)
frames_dir = os.path.join(args.root_dir, args.images_resized)
os.makedirs(frames_dir, exist_ok=True)
files = sorted(
glob.glob(os.path.join(args.root_dir, args.image_input, '*.jpg')) +
glob.glob(os.path.join(args.root_dir, args.image_input, '*.png')))
print('Resizing images ...')
for file_ind, file in enumerate(tqdm(files, desc=f'imresize: {vid_name}')):
out_frame_fn = f'{frames_dir}/{file_ind:05}.png'
# skip if both the output frame and the mask exist
if os.path.exists(out_frame_fn) and not args.overwrite:
continue
im = cv2.imread(file)
# resize if too big
if im.shape[1] > args.max_width or im.shape[0] > args.max_height:
factor = max(im.shape[1] / args.max_width, im.shape[0] / args.max_height)
dsize = (int(im.shape[1] / factor), int(im.shape[0] / factor))
im = cv2.resize(src=im, dsize=dsize, interpolation=cv2.INTER_AREA)
cv2.imwrite(out_frame_fn, im)
def generate_masks(args):
# ugly hack, masks expects images in images, but undistorted ones are going there later
undist_dir = os.path.join(args.root_dir, args.undistorted_output)
if not os.path.exists(undist_dir) or args.overwrite:
os.makedirs(undist_dir, exist_ok=True)
os.system(f'cp -r {args.root_dir}/{args.images_resized}/*.png {args.root_dir}/images')
os.system(f'CUDA_VISIBLE_DEVICES={args.cuda_device} python third_party/predict_mask.py --root_dir {args.root_dir}')
os.system(f'rm {args.root_dir}/images')
def run_colmap(args):
max_num_matches = 132768 # colmap setting
if not os.path.exists(f'{args.root_dir}/database.db') or args.overwrite:
os.system(f'''
CUDA_VISIBLE_DEVICES={args.cuda_device} colmap feature_extractor \
--database_path={args.root_dir}/database.db \
--image_path={args.root_dir}/{args.images_resized}\
--ImageReader.mask_path={args.root_dir}/masks \
--ImageReader.camera_model=SIMPLE_RADIAL \
--ImageReader.single_camera=1 \
--ImageReader.default_focal_length_factor=0.95 \
--SiftExtraction.peak_threshold=0.004 \
--SiftExtraction.max_num_features=8192 \
--SiftExtraction.edge_threshold=16''')
os.system(f'''
CUDA_VISIBLE_DEVICES={args.cuda_device} colmap exhaustive_matcher \
--database_path={args.root_dir}/database.db \
--SiftMatching.multiple_models=1 \
--SiftMatching.max_ratio=0.8 \
--SiftMatching.max_error=4.0 \
--SiftMatching.max_distance=0.7 \
--SiftMatching.max_num_matches={max_num_matches}''')
if not os.path.exists(f'{args.root_dir}/sparse') or args.overwrite:
os.makedirs(os.path.join(args.root_dir, 'sparse'), exist_ok=True)
os.system(f'''
CUDA_VISIBLE_DEVICES={args.cuda_device} colmap mapper \
--database_path={args.root_dir}/database.db \
--image_path={args.root_dir}/{args.images_resized} \
--output_path={args.root_dir}/sparse ''')
undist_dir = os.path.join(args.root_dir, args.undistorted_output)
if not os.path.exists(undist_dir) or args.overwrite:
os.makedirs(undist_dir, exist_ok=True)
os.system(f'''
CUDA_VISIBLE_DEVICES={args.cuda_device} colmap image_undistorter \
--input_path={args.root_dir}/sparse/0 \
--image_path={args.root_dir}/{args.images_resized} \
--output_path={args.root_dir} \
--output_type=COLMAP''')
def generate_depth(args):
disp_dir = os.path.join(args.root_dir, 'disps')
if not os.path.exists(disp_dir) or args.overwrite:
cur_dir = Path(__file__).absolute().parent
os.chdir(f'{str(cur_dir)}/third_party/depth')
os.environ['MKL_THREADING_LAYER'] = 'GNU'
#os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
# os.system(f'CUDA_VISIBLE_DEVICES={args.cuda_device} python run.py --Final --data_dir {args.root_dir}/images --output_dir {args.root_dir}/disps --depthNet 0')
os.system(f'CUDA_VISIBLE_DEVICES={args.cuda_device} python run_monodepth.py -i {args.root_dir}/images -o {args.root_dir}/disps -t dpt_large')
os.chdir(f'{str(cur_dir)}')
def generate_flow(args):
flow_fw_dir = os.path.join(args.root_dir, 'flow_fw')
flow_bw_dir = os.path.join(args.root_dir, 'flow_bw')
if not os.path.exists(flow_fw_dir) or not os.path.exists(flow_bw_dir) or args.overwrite:
cur_dir = Path(__file__).absolute().parent
os.chdir(f'{str(cur_dir)}/third_party/flow')
os.system(f'CUDA_VISIBLE_DEVICES={args.cuda_device} python demo.py --model models/raft-things.pth --path {args.root_dir}')
os.chdir(f'{str(cur_dir)}')
if __name__ == '__main__':
args = parse_args()
resize_frames(args)
generate_masks(args)
run_colmap(args)
generate_depth(args)
generate_flow(args)
print('finished!') | []
| []
| [
"MKL_SERVICE_FORCE_INTEL",
"MKL_THREADING_LAYER"
]
| [] | ["MKL_SERVICE_FORCE_INTEL", "MKL_THREADING_LAYER"] | python | 2 | 0 | |
artisan/doorman/core/db.go | /*
Onix Config Manager - Artisan's Doorman
Copyright (c) 2018-Present by www.gatblau.org
Licensed under the Apache License, Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0
Contributors to this project, hereby assign copyright in this code to the project,
to be licensed under the same terms as the rest of the code.
*/
package core
import (
"context"
"fmt"
"github.com/gatblau/onix/artisan/doorman/types"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"log"
"os"
"time"
)
const DbName = "doorman"
// Db manage MongoDb connections
type Db struct {
options *options.ClientOptions
}
func NewDb() *Db {
return &Db{
options: options.Client().ApplyURI(getDbConnString()),
}
}
// ctx create a context with timeout of 30 seconds
func ctx() context.Context {
context, _ := context.WithTimeout(context.Background(), 30*time.Second)
return context
}
// getDbConnString get the connection string to the MongoDb database
// e.g. mongodb://localhost:27017
// e.g. mongodb://user:[email protected]:27017/dbname?keepAlive=true&poolSize=30&autoReconnect=true&socketTimeoutMS=360000&connectTimeoutMS=360000
func getDbConnString() string {
value := os.Getenv("DOORMAN_DB_CONN")
if len(value) == 0 {
panic("DOORMAN_DB_CONN not defined")
}
return value
}
// InsertObject insert a nameable object in the specified collection
func (db *Db) InsertObject(collection types.Collection, obj types.Nameable) (interface{}, error) {
item, err := db.FindByName(collection, obj.GetName())
if err != nil {
return nil, err
}
// if the key was found
if item.Err() != mongo.ErrNoDocuments {
return nil, fmt.Errorf("object in %s collection with name %s already exist", collection, obj.GetName())
}
c := ctx()
client, err := mongo.Connect(c, db.options)
if err != nil {
return nil, err
}
defer client.Disconnect(c)
coll := client.Database(DbName).Collection(string(collection))
// insert the key
result, insertErr := coll.InsertOne(c, obj)
if insertErr != nil {
return nil, fmt.Errorf("cannot insert object into %s collection: %s", collection, err)
}
return result.InsertedID, nil
}
// FindByName find an object by name
func (db *Db) FindByName(collection types.Collection, name string) (*mongo.SingleResult, error) {
c := ctx()
client, err := mongo.Connect(c, db.options)
if err != nil {
return nil, err
}
defer client.Disconnect(c)
coll := client.Database(DbName).Collection(string(collection))
item := coll.FindOne(c, bson.M{"_id": name})
return item, nil
}
// FindMany find a number of objects matching the specified filter
func (db *Db) FindMany(collection types.Collection, filter bson.M, results interface{}) error {
c := ctx()
client, err := mongo.Connect(c, db.options)
if err != nil {
return err
}
defer client.Disconnect(c)
coll := client.Database(DbName).Collection(string(collection))
cursor, findErr := coll.Find(ctx(), filter)
if findErr != nil {
return findErr
}
// return elements that match the criteria
if err = cursor.All(ctx(), &results); err != nil {
return err
}
return nil
}
// ObjectExists checks if an object exists in the specified collection
func (db *Db) ObjectExists(collection types.Collection, name string) bool {
item, err := db.FindByName(collection, name)
if err != nil {
log.Printf("cannot retrieve item %s in collection %s: %s\n", name, collection, err)
return false
}
return item.Err() != mongo.ErrNoDocuments
}
// FindKeys retrieves one or more keys matching the specifies criteria decrypting the value of any private key
func (db *Db) FindKeys(filter bson.M) ([]types.Key, error) {
var results []types.Key
err := db.FindMany(types.KeysColl, filter, results)
if err != nil {
return nil, err
}
for i, key := range results {
if key.IsPrivate {
dec, decErr := decrypt(key.Value)
if decErr != nil {
return nil, err
}
results[i].Value = dec
}
}
return results, nil
}
| [
"\"DOORMAN_DB_CONN\""
]
| []
| [
"DOORMAN_DB_CONN"
]
| [] | ["DOORMAN_DB_CONN"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fileshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/funksvd_recommender_test.py | import os
from builder.matrix_factorization_calculator import MatrixFactorization
from recs.funksvd_recommender import FunkSVDRecs
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "prs_project.settings")
import django
django.setup()
import unittest
import pandas as pd
STAR_WARS = 'star wars'
WONDER_WOMAN = 'wonder woman'
AVENGERS = 'avengers'
WOLVERINE = 'logan'
PIRATES_OF = 'pirates of the caribbien'
HARRY = 'harry potter I'
CAPTAIN_AMERICA = 'captain america'
ALIEN = 'alien'
DR_STRANGELOVE = 'doctor strangelove'
JACQUES = 'jacques'
class TestNeighborhoodBasedRecs(unittest.TestCase):
def setUp(self):
self.ratings = pd.DataFrame(
[['1', STAR_WARS, 9, '2013-10-12 23:21:27+00:00'],
['1', WONDER_WOMAN, 10, '2014-10-12 23:22:27+00:00'],
['1', AVENGERS, 10, '2015-11-12 23:20:27+00:00'],
['1', WOLVERINE, 8, '2015-08-12 23:20:27+00:00'],
['1', PIRATES_OF, 10, '2015-10-12 22:20:27+00:00'],
['1', HARRY, 10, '2015-10-12 23:21:27+00:00'],
['1', CAPTAIN_AMERICA, 10, '2014-10-12 23:20:27+00:00'],
['1', ALIEN, 6, '2015-10-12 23:22:27+00:00'],
['1', JACQUES, 6, '2015-10-12 11:20:27+00:00'],
['2', STAR_WARS, 10, '2013-10-12 23:20:27+00:00'],
['2', WONDER_WOMAN, 10, '2014-10-12 23:20:27+00:00'],
['2', AVENGERS, 9, '2016-10-12 23:20:27+00:00'],
['2', PIRATES_OF, 6, '2010-10-12 23:20:27+00:00'],
['2', CAPTAIN_AMERICA, 10, '2005-10-12 23:20:27+00:00'],
['2', DR_STRANGELOVE, 10, '2015-01-12 23:20:27+00:00'],
['3', STAR_WARS, 9, '2013-10-12 20:20:27+00:00'],
['3', AVENGERS, 10, '2015-10-12 10:20:27+00:00'],
['3', PIRATES_OF, 9, '2013-03-12 23:20:27+00:00'],
['3', HARRY, 8, '2016-10-13 23:20:27+00:00'],
['3', DR_STRANGELOVE, 10, '2016-09-12 23:20:27+00:00'],
['4', STAR_WARS, 8, '2013-10-12 23:20:27+00:00'],
['4', WONDER_WOMAN, 8, '2014-10-12 23:20:27+00:00'],
['4', AVENGERS, 9, '2015-10-12 23:20:27+00:00'],
['4', PIRATES_OF, 5, '2013-10-12 23:20:27+00:00'],
['4', HARRY, 6, '2014-10-12 23:20:27+00:00'],
['4', ALIEN, 8, '2015-10-12 23:20:27+00:00'],
['4', DR_STRANGELOVE, 9, '2015-10-12 23:20:27+00:00'],
['5', STAR_WARS, 6, '2013-10-12 23:20:27+00:00'],
['5', AVENGERS, 1, '2014-10-12 23:20:27+00:00'],
['5', WOLVERINE, 2, '2015-10-12 23:20:27+00:00'],
['5', PIRATES_OF, 2, '2016-10-12 23:20:27+00:00'],
['5', HARRY, 10, '2016-10-12 23:20:27+00:00'],
['5', CAPTAIN_AMERICA, 1, '2016-10-12 23:20:27+00:00'],
['5', ALIEN, 4, '2016-10-12 23:20:27+00:00'],
['5', DR_STRANGELOVE, 3, '2016-10-12 23:20:27+00:00'],
['5', JACQUES, 10, '2016-10-12 23:20:27+00:00'],
['6', STAR_WARS, 8, '2013-10-12 23:20:27+00:00'],
['6', WONDER_WOMAN, 8, '2014-10-12 23:20:27+00:00'],
['6', AVENGERS, 8, '2014-10-12 23:20:27+00:00'],
['6', WOLVERINE, 8, '2015-10-12 23:20:27+00:00'],
['6', PIRATES_OF, 6, '2016-10-12 23:20:27+00:00'],
['6', HARRY, 10, '2016-10-12 23:20:27+00:00'],
['6', JACQUES, 8, '2016-10-12 23:20:27+00:00'],
['7', AVENGERS, 10, '2014-10-12 23:20:27+00:00'],
['7', PIRATES_OF, 3, '2016-10-12 23:20:27+00:00'],
['7', HARRY, 1, '2016-10-12 23:20:27+00:00'],
['7', ALIEN, 8, '2016-10-12 23:20:27+00:00'],
['7', DR_STRANGELOVE, 10, '2016-10-12 23:20:27+00:00'],
['8', STAR_WARS, 9, '2013-10-12 23:20:27+00:00'],
['8', WONDER_WOMAN, 7, '2014-10-12 23:20:27+00:00'],
['8', AVENGERS, 7, '2014-10-12 23:20:27+00:00'],
['8', WOLVERINE, 7, '2015-10-12 23:20:27+00:00'],
['8', PIRATES_OF, 8, '2016-10-12 23:20:27+00:00'],
['8', HARRY, 8, '2016-10-12 23:20:27+00:00'],
['8', ALIEN, 8, '2016-10-12 23:20:27+00:00'],
['8', DR_STRANGELOVE, 8, '2016-10-12 23:20:27+00:00'],
['8', JACQUES, 10, '2016-10-12 23:20:27+00:00'],
['9', WONDER_WOMAN, 7, '2014-10-12 23:20:27+00:00'],
['9', AVENGERS, 8, '2014-10-12 23:20:27+00:00'],
['9', WOLVERINE, 8, '2015-10-12 23:20:27+00:00'],
['9', PIRATES_OF, 7, '2016-10-12 23:20:27+00:00'],
['9', HARRY, 8, '2016-10-12 23:20:27+00:00'],
['9', CAPTAIN_AMERICA, 10, '2016-10-12 23:20:27+00:00'],
['9', DR_STRANGELOVE, 10, '2016-10-12 23:20:27+00:00'],
['9', JACQUES, 7, '2016-10-12 23:20:27+00:00'],
['10', AVENGERS, 7, '2014-10-12 23:20:27+00:00'],
['10', ALIEN, 10, '2016-10-12 23:20:27+00:00'],
['10', CAPTAIN_AMERICA, 6, '2016-10-12 23:20:27+00:00'],
['10', DR_STRANGELOVE, 8, '2016-10-12 23:20:27+00:00'],
], columns=['user_id', 'movie_id', 'rating', 'rating_timestamp'])
self.save_path = './test/'
self.k=3
MF = MatrixFactorization(save_path=self.save_path)
MF.train(self.ratings, k=self.k)
def test_rec(self):
recommender = FunkSVDRecs(self.save_path)
recs = recommender.recommend_items_by_ratings('1',
[{'movie_id': AVENGERS, 'rating': 7},
{'movie_id': ALIEN, 'rating': 10},
{'movie_id': CAPTAIN_AMERICA, 'rating': 6}], num=2)
self.assertIsNotNone(recs)
self.assertEqual(len(recs), 2)
def test_rec2(self):
recommender = FunkSVDRecs(self.save_path)
recs = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=5)
self.assertIsNotNone(recs)
self.assertEqual(len(recs), 5)
top = [r[0] for r in recs][:2]
self.assertIn(HARRY, top, '{} was missing from {}'.format(HARRY, top))
self.assertIn(JACQUES, top, '{} was missing from {}'.format(JACQUES, top))
def test_rec_increasing(self):
recommender = FunkSVDRecs(self.save_path)
recs1 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=2)
self.assertIsNotNone(recs1)
self.assertEqual(len(recs1), 2)
recs2 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=3)
self.assertIsNotNone(recs2)
self.assertEqual(len(recs2), 3)
self.assertEqual(recs1[0],recs2[0] )
self.assertEqual(recs1[1],recs2[1] )
def test_rec_increasing2(self):
recommender = FunkSVDRecs(self.save_path)
recs4 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=4)
self.assertIsNotNone(recs4)
self.assertEqual(len(recs4), 4)
self.assertAlmostEqual(recs4[1][1]['prediction'], 7.812836963)
recs6 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=6)
self.assertIsNotNone(recs6)
self.assertEqual(len(recs6), 6)
self.compare_recs(recs4, recs6)
recommender = FunkSVDRecs(self.save_path)
recs42 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=4)
self.compare_recs(recs4, recs42)
recs1 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=7)
recs2 = recommender.recommend_items_by_ratings('5',
[{'movie_id': AVENGERS, 'rating': 1}], num=9)
self.compare_recs(recs1, recs2)
def compare_recs(self, recs1, recs2):
for i in range(len(recs1)):
self.assertEqual(recs1[i][0], recs2[i][0])
if __name__ == '__main__':
unittest.main() | []
| []
| []
| [] | [] | python | 0 | 0 | |
qa/rpc-tests/p2p-acceptblock.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ATMBCOIND", "dogecoind"),
help="dogecoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| []
| []
| [
"ATMBCOIND"
]
| [] | ["ATMBCOIND"] | python | 1 | 0 | |
cluster-autoscaler/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"context"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"sync"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
"go.etcd.io/etcd/clientv3"
"k8s.io/klog/v2"
)
const (
// We have set a buffer in order to reduce times of context switches.
incomingBufSize = 100
outgoingBufSize = 100
)
// fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error
var fatalOnDecodeError = false
// errTestingDecode is the only error that testingDeferOnDecodeError catches during a panic
var errTestingDecode = errors.New("sentinel error only used during testing to indicate watch decoding error")
// testingDeferOnDecodeError is used during testing to recover from a panic caused by errTestingDecode, all other values continue to panic
func testingDeferOnDecodeError() {
if r := recover(); r != nil && r != errTestingDecode {
panic(r)
}
}
func init() {
// check to see if we are running in a test environment
TestOnlySetFatalOnDecodeError(true)
fatalOnDecodeError, _ = strconv.ParseBool(os.Getenv("KUBE_PANIC_WATCH_DECODE_ERROR"))
}
// TestOnlySetFatalOnDecodeError should only be used for cases where decode errors are expected and need to be tested. e.g. conversion webhooks.
func TestOnlySetFatalOnDecodeError(b bool) {
fatalOnDecodeError = b
}
type watcher struct {
client *clientv3.Client
codec runtime.Codec
newFunc func() runtime.Object
objectType string
versioner storage.Versioner
transformer value.Transformer
}
// watchChan implements watch.Interface.
type watchChan struct {
watcher *watcher
key string
initialRev int64
recursive bool
progressNotify bool
internalPred storage.SelectionPredicate
ctx context.Context
cancel context.CancelFunc
incomingEventChan chan *event
resultChan chan watch.Event
errChan chan error
}
func newWatcher(client *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, versioner storage.Versioner, transformer value.Transformer) *watcher {
res := &watcher{
client: client,
codec: codec,
newFunc: newFunc,
versioner: versioner,
transformer: transformer,
}
if newFunc == nil {
res.objectType = "<unknown>"
} else {
res.objectType = reflect.TypeOf(newFunc()).String()
}
return res
}
// Watch watches on a key and returns a watch.Interface that transfers relevant notifications.
// If rev is zero, it will return the existing object(s) and then start watching from
// the maximum revision+1 from returned objects.
// If rev is non-zero, it will watch events happened after given revision.
// If recursive is false, it watches on given key.
// If recursive is true, it watches any children and directories under the key, excluding the root key itself.
// pred must be non-nil. Only if pred matches the change, it will be returned.
func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) (watch.Interface, error) {
if recursive && !strings.HasSuffix(key, "/") {
key += "/"
}
wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, pred)
go wc.run()
// For etcd watch we don't have an easy way to answer whether the watch
// has already caught up. So in the initial version (given that watchcache
// is by default enabled for all resources but Events), we just deliver
// the initialization signal immediately. Improving this will be explored
// in the future.
utilflowcontrol.WatchInitialized(ctx)
return wc, nil
}
func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) *watchChan {
wc := &watchChan{
watcher: w,
key: key,
initialRev: rev,
recursive: recursive,
progressNotify: progressNotify,
internalPred: pred,
incomingEventChan: make(chan *event, incomingBufSize),
resultChan: make(chan watch.Event, outgoingBufSize),
errChan: make(chan error, 1),
}
if pred.Empty() {
// The filter doesn't filter out any object.
wc.internalPred = storage.Everything
}
// The etcd server waits until it cannot find a leader for 3 election
// timeouts to cancel existing streams. 3 is currently a hard coded
// constant. The election timeout defaults to 1000ms. If the cluster is
// healthy, when the leader is stopped, the leadership transfer should be
// smooth. (leader transfers its leadership before stopping). If leader is
// hard killed, other servers will take an election timeout to realize
// leader lost and start campaign.
wc.ctx, wc.cancel = context.WithCancel(clientv3.WithRequireLeader(ctx))
return wc
}
func (wc *watchChan) run() {
watchClosedCh := make(chan struct{})
go wc.startWatching(watchClosedCh)
var resultChanWG sync.WaitGroup
resultChanWG.Add(1)
go wc.processEvent(&resultChanWG)
select {
case err := <-wc.errChan:
if err == context.Canceled {
break
}
errResult := transformErrorToEvent(err)
if errResult != nil {
// error result is guaranteed to be received by user before closing ResultChan.
select {
case wc.resultChan <- *errResult:
case <-wc.ctx.Done(): // user has given up all results
}
}
case <-watchClosedCh:
case <-wc.ctx.Done(): // user cancel
}
// We use wc.ctx to reap all goroutines. Under whatever condition, we should stop them all.
// It's fine to double cancel.
wc.cancel()
// we need to wait until resultChan wouldn't be used anymore
resultChanWG.Wait()
close(wc.resultChan)
}
func (wc *watchChan) Stop() {
wc.cancel()
}
func (wc *watchChan) ResultChan() <-chan watch.Event {
return wc.resultChan
}
// sync tries to retrieve existing data and send them to process.
// The revision to watch will be set to the revision in response.
// All events sent will have isCreated=true
func (wc *watchChan) sync() error {
opts := []clientv3.OpOption{}
if wc.recursive {
opts = append(opts, clientv3.WithPrefix())
}
getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...)
if err != nil {
return err
}
wc.initialRev = getResp.Header.Revision
for _, kv := range getResp.Kvs {
wc.sendEvent(parseKV(kv))
}
return nil
}
// logWatchChannelErr checks whether the error is about mvcc revision compaction which is regarded as warning
func logWatchChannelErr(err error) {
if !strings.Contains(err.Error(), "mvcc: required revision has been compacted") {
klog.Errorf("watch chan error: %v", err)
} else {
klog.Warningf("watch chan error: %v", err)
}
}
// startWatching does:
// - get current objects if initialRev=0; set initialRev to current rev
// - watch on given key and send events to process.
func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
if wc.initialRev == 0 {
if err := wc.sync(); err != nil {
klog.Errorf("failed to sync with latest state: %v", err)
wc.sendError(err)
return
}
}
opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()}
if wc.recursive {
opts = append(opts, clientv3.WithPrefix())
}
if wc.progressNotify {
opts = append(opts, clientv3.WithProgressNotify())
}
wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...)
for wres := range wch {
if wres.Err() != nil {
err := wres.Err()
// If there is an error on server (e.g. compaction), the channel will return it before closed.
logWatchChannelErr(err)
wc.sendError(err)
return
}
if wres.IsProgressNotify() {
wc.sendEvent(progressNotifyEvent(wres.Header.GetRevision()))
metrics.RecordEtcdBookmark(wc.watcher.objectType)
continue
}
for _, e := range wres.Events {
parsedEvent, err := parseEvent(e)
if err != nil {
logWatchChannelErr(err)
wc.sendError(err)
return
}
wc.sendEvent(parsedEvent)
}
}
// When we come to this point, it's only possible that client side ends the watch.
// e.g. cancel the context, close the client.
// If this watch chan is broken and context isn't cancelled, other goroutines will still hang.
// We should notify the main thread that this goroutine has exited.
close(watchClosedCh)
}
// processEvent processes events from etcd watcher and sends results to resultChan.
func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
case e := <-wc.incomingEventChan:
res := wc.transform(e)
if res == nil {
continue
}
if len(wc.resultChan) == outgoingBufSize {
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", wc.watcher.objectType)
}
// If user couldn't receive results fast enough, we also block incoming events from watcher.
// Because storing events in local will cause more memory usage.
// The worst case would be closing the fast watcher.
select {
case wc.resultChan <- *res:
case <-wc.ctx.Done():
return
}
case <-wc.ctx.Done():
return
}
}
}
func (wc *watchChan) filter(obj runtime.Object) bool {
if wc.internalPred.Empty() {
return true
}
matched, err := wc.internalPred.Matches(obj)
return err == nil && matched
}
func (wc *watchChan) acceptAll() bool {
return wc.internalPred.Empty()
}
// transform transforms an event into a result for user if not filtered.
func (wc *watchChan) transform(e *event) (res *watch.Event) {
curObj, oldObj, err := wc.prepareObjs(e)
if err != nil {
klog.Errorf("failed to prepare current and previous objects: %v", err)
wc.sendError(err)
return nil
}
switch {
case e.isProgressNotify:
if wc.watcher.newFunc == nil {
return nil
}
object := wc.watcher.newFunc()
if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil {
klog.Errorf("failed to propagate object version: %v", err)
return nil
}
res = &watch.Event{
Type: watch.Bookmark,
Object: object,
}
case e.isDeleted:
if !wc.filter(oldObj) {
return nil
}
res = &watch.Event{
Type: watch.Deleted,
Object: oldObj,
}
case e.isCreated:
if !wc.filter(curObj) {
return nil
}
res = &watch.Event{
Type: watch.Added,
Object: curObj,
}
default:
if wc.acceptAll() {
res = &watch.Event{
Type: watch.Modified,
Object: curObj,
}
return res
}
curObjPasses := wc.filter(curObj)
oldObjPasses := wc.filter(oldObj)
switch {
case curObjPasses && oldObjPasses:
res = &watch.Event{
Type: watch.Modified,
Object: curObj,
}
case curObjPasses && !oldObjPasses:
res = &watch.Event{
Type: watch.Added,
Object: curObj,
}
case !curObjPasses && oldObjPasses:
res = &watch.Event{
Type: watch.Deleted,
Object: oldObj,
}
}
}
return res
}
func transformErrorToEvent(err error) *watch.Event {
err = interpretWatchError(err)
if _, ok := err.(apierrors.APIStatus); !ok {
err = apierrors.NewInternalError(err)
}
status := err.(apierrors.APIStatus).Status()
return &watch.Event{
Type: watch.Error,
Object: &status,
}
}
func (wc *watchChan) sendError(err error) {
select {
case wc.errChan <- err:
case <-wc.ctx.Done():
}
}
func (wc *watchChan) sendEvent(e *event) {
if len(wc.incomingEventChan) == incomingBufSize {
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow decoding, user not receiving fast, or other processing logic", "incomingEvents", incomingBufSize, "objectType", wc.watcher.objectType)
}
select {
case wc.incomingEventChan <- e:
case <-wc.ctx.Done():
}
}
func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtime.Object, err error) {
if e.isProgressNotify {
// progressNotify events doesn't contain neither current nor previous object version,
return nil, nil, nil
}
if !e.isDeleted {
data, _, err := wc.watcher.transformer.TransformFromStorage(e.value, authenticatedDataString(e.key))
if err != nil {
return nil, nil, err
}
curObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev)
if err != nil {
return nil, nil, err
}
}
// We need to decode prevValue, only if this is deletion event or
// the underlying filter doesn't accept all objects (otherwise we
// know that the filter for previous object will return true and
// we need the object only to compute whether it was filtered out
// before).
if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) {
data, _, err := wc.watcher.transformer.TransformFromStorage(e.prevValue, authenticatedDataString(e.key))
if err != nil {
return nil, nil, err
}
// Note that this sends the *old* object with the etcd revision for the time at
// which it gets deleted.
oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev)
if err != nil {
return nil, nil, err
}
}
return curObj, oldObj, nil
}
func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (_ runtime.Object, err error) {
obj, err := runtime.Decode(codec, []byte(data))
if err != nil {
if fatalOnDecodeError {
// catch watch decode error iff we caused it on
// purpose during a unit test
defer testingDeferOnDecodeError()
// we are running in a test environment and thus an
// error here is due to a coder mistake if the defer
// does not catch it
panic(err)
}
return nil, err
}
// ensure resource version is set on the object we load from etcd
if err := versioner.UpdateObject(obj, uint64(rev)); err != nil {
return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err)
}
return obj, nil
}
| [
"\"KUBE_PANIC_WATCH_DECODE_ERROR\""
]
| []
| [
"KUBE_PANIC_WATCH_DECODE_ERROR"
]
| [] | ["KUBE_PANIC_WATCH_DECODE_ERROR"] | go | 1 | 0 | |
cmd/entrypoint-wrapper/main.go | package main
import (
"context"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"path"
"path/filepath"
"syscall"
"time"
"github.com/sirupsen/logrus"
coreapi "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"github.com/openshift/ci-tools/pkg/steps"
"github.com/openshift/ci-tools/pkg/util"
)
var (
coreScheme = runtime.NewScheme()
codecFactory = serializer.NewCodecFactory(coreScheme)
encoder runtime.Encoder
)
func init() {
utilruntime.Must(coreapi.AddToScheme(coreScheme))
encoder = codecFactory.LegacyCodec(coreapi.SchemeGroupVersion)
}
func main() {
flagSet := flag.NewFlagSet("", flag.ExitOnError)
opt := bindOptions(flagSet)
if err := flagSet.Parse(os.Args[1:]); err != nil {
logrus.WithError(err).Fatal("Failed to parsse flagset")
}
opt.cmd = flagSet.Args()
if err := opt.complete(); err != nil {
fmt.Fprintln(os.Stderr, "error:", err)
os.Exit(1)
}
if err := opt.run(); err != nil {
fmt.Fprintln(os.Stderr, "error:", err)
os.Exit(1)
}
}
type options struct {
dry bool
name string
srcPath string
dstPath string
cmd []string
client coreclientset.SecretInterface
}
func bindOptions(flag *flag.FlagSet) *options {
opt := &options{}
flag.BoolVar(&opt.dry, "dry-run", false, "Print the secret instead of creating it")
return opt
}
func (o *options) complete() error {
if len(o.cmd) == 0 {
return fmt.Errorf("a command is required")
}
if o.srcPath = os.Getenv("SHARED_DIR"); o.srcPath == "" {
return fmt.Errorf("environment variable SHARED_DIR is empty")
}
o.dstPath = filepath.Join(os.TempDir(), "secret")
os.Setenv("SHARED_DIR", o.dstPath)
var ns string
if ns = os.Getenv("NAMESPACE"); ns == "" {
return fmt.Errorf("environment variable NAMESPACE is empty")
}
if o.name = os.Getenv("JOB_NAME_SAFE"); o.name == "" {
return fmt.Errorf("environment variable JOB_NAME_SAFE is empty")
}
if !o.dry {
var err error
if o.client, err = loadClient(ns); err != nil {
return err
}
}
return nil
}
func (o *options) run() error {
if err := copyDir(o.dstPath, o.srcPath); err != nil {
return fmt.Errorf("failed to copy secret mount: %w", err)
}
var errs []error
ctx, cancel := context.WithCancel(context.Background())
go uploadKubeconfig(ctx, o.client, o.name, o.dstPath, o.dry)
if err := execCmd(o.cmd); err != nil {
errs = append(errs, fmt.Errorf("failed to execute wrapped command: %w", err))
}
// we will upload the secret from the post-execution state, so we know
// that the best-effort upload of the kubeconfig can exit now and so as
// not to race with the post-execution one
cancel()
if err := createSecret(o.client, o.name, o.dstPath, o.dry); err != nil {
errs = append(errs, fmt.Errorf("failed to create/update secret: %w", err))
}
return utilerrors.NewAggregate(errs)
}
func loadClient(namespace string) (coreclientset.SecretInterface, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("failed to load cluster config: %w", err)
}
client, err := coreclientset.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client.Secrets(namespace), nil
}
func copyDir(dst, src string) error {
if err := os.MkdirAll(dst, 0770); err != nil {
return err
}
dir, err := os.Open(src)
if err != nil {
return err
}
files, err := dir.Readdirnames(-1)
if err != nil {
return err
}
for _, f := range files {
srcPath := filepath.Join(src, f)
if stat, err := os.Stat(srcPath); err != nil {
return err
} else if stat.IsDir() {
continue
}
srcFD, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFD.Close()
dstFD, err := os.Create(filepath.Join(dst, f))
if err != nil {
return err
}
defer dstFD.Close()
_, err = io.Copy(dstFD, srcFD)
if err != nil {
return err
}
}
return nil
}
func execCmd(argv []string) error {
proc := exec.Command(argv[0], argv[1:]...)
proc.Stdout = os.Stdout
proc.Stderr = os.Stderr
if proc.Env == nil {
// the command inherits the environment if it's nil,
// explicitly set it so when we change it, we add to
// the inherited set instead of overwriting
proc.Env = os.Environ()
}
manageHome(proc)
manageCLI(proc)
if err := manageKubeconfig(proc); err != nil {
return err
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for {
select {
case <-ctx.Done():
return
case s := <-sig:
fmt.Fprintf(os.Stderr, "received signal %d, forwarding\n", s)
if err := proc.Process.Signal(s); err != nil {
logrus.WithError(err).Error("Failed to forward signal")
}
}
}
}()
return proc.Run()
}
// manageCLI configures the PATH to include a CLI_DIR if one was provided
func manageCLI(proc *exec.Cmd) {
cliDir, set := os.LookupEnv(steps.CliEnv)
if set {
proc.Env = append(proc.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv("PATH"), cliDir))
}
}
// manageHome provides a writeable home so kubectl discovery can be cached
func manageHome(proc *exec.Cmd) {
home, set := os.LookupEnv("HOME")
needHome := !set
if set {
if err := syscall.Access(home, syscall.O_RDWR); err != nil {
// $HOME is set but not writeable
needHome = true
}
}
if needHome {
// the last of any duplicate keys is used for env
proc.Env = append(proc.Env, "HOME=/alabama")
}
}
// manageKubeconfig provides a unique writeable kubeconfig so users can for example set a namespace,
// but changes are not propagated to subsequent steps to limit the amount of possible mistakes
func manageKubeconfig(proc *exec.Cmd) error {
if original, set := os.LookupEnv("KUBECONFIG"); set {
writableCopy, err := ioutil.TempFile("", "kubeconfig-*")
if err != nil {
return fmt.Errorf("could not create unique, writeable $KUBECONFIG copy: %w", err)
}
proc.Env = append(proc.Env, fmt.Sprintf("KUBECONFIG=%s", writableCopy.Name()))
// the source KUBECONFIG may begin to exist if it does not exist at the start, so poll for it
go func() {
if err := wait.PollImmediateInfinite(time.Second, func() (done bool, err error) {
if _, err := os.Stat(original); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
source, err := os.Open(original)
if err != nil {
return true, err
}
if _, err := io.Copy(writableCopy, source); err != nil {
return true, err
}
if err := writableCopy.Close(); err != nil {
return true, err
}
if err := source.Close(); err != nil {
return true, err
}
return true, nil
}); err != nil {
logrus.WithError(err).Warn("could not populate unique, writeable $KUBECONFIG copy: %w", err)
}
}()
}
return nil
}
func createSecret(client coreclientset.SecretInterface, name, dir string, dry bool) error {
if _, err := os.Stat(dir); err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("failed to stat directory %q: %w", dir, err)
}
secret, err := util.SecretFromDir(dir)
if err != nil {
return fmt.Errorf("failed to generate secret: %w", err)
}
secret.Name = name
if secret.Labels == nil {
secret.Labels = map[string]string{}
}
secret.Labels[steps.SkipCensoringLabel] = "true"
if dry {
err := encoder.Encode(secret, os.Stdout)
if err != nil {
return fmt.Errorf("failed to log secret: %w", err)
}
} else if _, err := client.Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update secret: %w", err)
}
return nil
}
// uploadKubeconfig will do a best-effort attempt at uploading a kubeconfig
// file if one does not exist at the time we start running but one does get
// created while executing the command
func uploadKubeconfig(ctx context.Context, client coreclientset.SecretInterface, name, dir string, dry bool) {
if _, err := os.Stat(path.Join(dir, "kubeconfig")); err == nil {
// kubeconfig already exists, no need to do anything
return
}
var uploadErr error
if err := wait.PollUntil(time.Second, func() (done bool, err error) {
if _, uploadErr = os.Stat(path.Join(dir, "kubeconfig")); uploadErr != nil {
return false, nil
}
// kubeconfig exists, we can upload it
uploadErr = createSecret(client, name, dir, dry)
return uploadErr == nil, nil // retry errors
}, ctx.Done()); !errors.Is(err, wait.ErrWaitTimeout) {
log.Printf("Failed to upload $KUBECONFIG: %v: %v\n", err, uploadErr)
}
}
| [
"\"SHARED_DIR\"",
"\"NAMESPACE\"",
"\"JOB_NAME_SAFE\"",
"\"PATH\""
]
| []
| [
"NAMESPACE",
"SHARED_DIR",
"PATH",
"JOB_NAME_SAFE"
]
| [] | ["NAMESPACE", "SHARED_DIR", "PATH", "JOB_NAME_SAFE"] | go | 4 | 0 | |
utils/common.py | import numpy as np
import random
import os
import time
import importlib
import cv2
from PIL import Image
import math
import pickle
import torch
from torch import distributed as dist
from torch.utils.data.sampler import Sampler
def load_module(module_type, module_name):
m = importlib.import_module(f'{module_type}.{module_name}')
return m
def return_empty_dict_if_none(x):
return {} if x is None else x
def get_data_sampler(dataset, shuffle=False, is_distributed=False):
if is_distributed:
return torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return torch.utils.data.RandomSampler(dataset)
else:
return torch.utils.data.SequentialSampler(dataset)
def dict2device(d, device, dtype=None):
if isinstance(d, np.ndarray):
d = torch.from_numpy(d)
if torch.is_tensor(d):
d = d.to(device)
if dtype is not None:
d = d.type(dtype)
return d
if isinstance(d, dict):
for k, v in d.items():
d[k] = dict2device(v, device, dtype=dtype)
return d
def setup_environment(seed):
# random
random.seed(seed)
# numpy
np.random.seed(seed)
# cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
# pytorch
os.environ['OMP_NUM_THREADS'] = '1'
torch.set_num_threads(1)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def squeeze_metrics(d):
metrics = dict()
for k, v in d.items():
if torch.is_tensor(v):
metrics[k] = v.mean().item()
elif isinstance(v, float):
metrics[k] = v
else:
raise NotImplementedError("Unknown datatype for metric: {}".format(type(v)))
return metrics
def reduce_metrics(metrics):
metrics_dict = dict()
for k in metrics[0].keys():
metrics_dict[k] = np.mean([item[k] for item in metrics])
return metrics_dict
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def flatten_parameters(parameters):
list_of_flat_parameters = [torch.flatten(p) for p in parameters]
flat_parameters = torch.cat(list_of_flat_parameters).view(-1, 1)
return flat_parameters
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def itt(img):
tensor = torch.FloatTensor(img) #
if len(tensor.shape) == 3:
tensor = tensor.permute(2, 0, 1)
else:
tensor = tensor.unsqueeze(0)
return tensor
def tti(tensor):
tensor = tensor.detach().cpu()
tensor = tensor[0].permute(1, 2, 0)
image = tensor.numpy()
if image.shape[-1] == 1:
image = image[..., 0]
return image
def to_tanh(t):
return t * 2 - 1.
def to_sigm(t):
return (t + 1) / 2
def get_rotation_matrix(angle, axis='x'):
if axis == 'x':
return np.array([
[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]
])
elif axis == 'y':
return np.array([
[np.cos(angle), 0, -np.sin(angle)],
[0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]
])
elif axis == 'z':
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
else:
raise ValueError(f"Unkown axis {axis}")
def rotate_verts(vertices, angle, K, K_inv, axis='y', mean_point=None):
rot_mat = get_rotation_matrix(angle, axis)
rot_mat = torch.FloatTensor(rot_mat).to(vertices.device).unsqueeze(0)
vertices_world = torch.bmm(vertices, K_inv.transpose(1, 2))
if mean_point is None:
mean_point = vertices_world.mean(dim=1)
vertices_rot = vertices_world - mean_point
vertices_rot = torch.bmm(vertices_rot, rot_mat.transpose(1, 2))
vertices_rot = vertices_rot + mean_point
vertices_rot_cam = torch.bmm(vertices_rot, K.transpose(1, 2))
return vertices_rot_cam, mean_point
def json2kps(openpose_dict):
list2kps = lambda x: np.array(x).reshape(-1, 3)
keys_to_save = ['pose_keypoints_2d', 'face_keypoints_2d', 'hand_right_keypoints_2d', 'hand_left_keypoints_2d']
kps = openpose_dict['people']
if len(kps) == 0:
kp_stacked = np.ones((137, 2)) * -1
return kp_stacked
kps = kps[0]
kp_parts = [list2kps(kps[key]) for key in keys_to_save]
kp_stacked = np.concatenate(kp_parts, axis=0)
kp_stacked[kp_stacked[:, 2] < 0.1, :] = -1
kp_stacked = kp_stacked[:, :2]
return kp_stacked
def segment_img(img, segm):
img = to_sigm(img) * segm
img = to_tanh(img)
return img
def segm2mask(segm):
segm = torch.sum(segm, dim=1, keepdims=True) # Bx3xHxW -> Bx1xHxW
segm = (segm > 0.0).type(torch.float32)
return segm | []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
online_annotation_system/views.py | # -*- coding: utf-8 -*-
import base64
import os
import logging
from collections import defaultdict
from flask import render_template, jsonify, request, current_app, redirect, session
from flask_login import login_user, logout_user, login_required, current_user
from flask_principal import Identity, identity_changed, AnonymousIdentity
from wtforms import PasswordField, StringField, validators, SubmitField
from flask_wtf import FlaskForm
from werkzeug.utils import secure_filename
from concurrent.futures import ThreadPoolExecutor
from online_annotation_system.models import Object, Size, Annotations, DB
from online_annotation_system.xml_generater import XMLGenerator
from online_annotation_system.utils import get_image_width_and_height, get_no_repeated_save_path_and_filename, count_files_by_category
executor = ThreadPoolExecutor(1)
logger = logging.getLogger("views")
XML_SAVE_PATH = os.environ.get("XML_SAVE_PATH")
IMAGE_SAVE_PATH = os.environ.get("IMAGE_SAVE_PATH")
ALLOWED_CATEGORIES = ["plastique|ๅกๆ", "metal|้ๅฑ", "papier|็บธ", "verre|็ป็", "menage|็ปฟ่ฒๅๅพ", "encombrants|ๅคงไฝ็งฏๅๅพ",
"electroniques|็ตๅญไบงๅ", "piles|็ตๆฑ ",
"ampoule|็ฏๆณก", "vetements|่กฃๆ", "medicaments|่ฏๅ", "carton|็บธๆฟ", "humain|ไบบ็ฑป", "cigarette|้ฆ็"]
EXPECTED_NUMBER = 250
@login_required
def home():
return render_template('home.html',
categories=ALLOWED_CATEGORIES,
current_user=current_user)
class LoginForm(FlaskForm):
username = StringField('Username', validators=[validators.DataRequired()])
password = PasswordField('Password', validators=[validators.DataRequired()])
submit = SubmitField('Sign In')
def login():
form = LoginForm()
if form.validate_on_submit():
user = DB.find_user(username=form.username.data)
if user and form.password.data == user.password and form.username.data == user.username:
login_user(user, remember=True)
logger.info("User " + str(user) + " logged in.")
session['user_id'] = user.id
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
return redirect('/')
return render_template('login.html', title='Sign In', form=form)
@login_required
def logout():
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect('/login')
@login_required
def upload_annotations_and_photo():
if request.method == 'POST':
logger.info("Receive a request post.")
image = request.json['imageBase64']
if image:
image_data = base64.decodebytes(str.encode(image))
image_width, image_height = get_image_width_and_height(image_data)
annotations = Annotations(
annotations=request.json['annotations'],
image_width=image_width,
image_height=image_height)
logger.info("Annotation information: " + str(annotations))
filename = request.json['filename']
files = []
filename = secure_filename(filename)
files.append({'name': filename})
_upload_image(image_data, filename, annotations, image_width, image_height)
return jsonify(files=files), 201
def _upload_image(image_data, filename, annotations, image_width, image_height):
objects = [Object(name=label, bounding_box=bounding_box) for label, bounding_box in annotations]
label = objects[0].name if len(objects) else None
if label is not None:
save_path, filename = get_no_repeated_save_path_and_filename(
os.path.join(IMAGE_SAVE_PATH, label), filename)
logger.info("Start saving image. in: " + save_path)
_save_image(image_data, save_path)
logger.info("Saved image in: " + save_path)
logger.info("Image size: " + str((image_width, image_height)))
size = Size(width=image_width, height=image_height, depth=3)
xml_generator = XMLGenerator(
folder=label,
filename=filename,
path=save_path,
size=size,
objects=objects)
xml_generator.build_xml_tree()
xml_generator.write_xml_to_path(base_path=XML_SAVE_PATH)
def _save_image(image_data, where_to_save):
with open(where_to_save, "wb") as fh:
fh.write(image_data)
def all_categories():
return jsonify([cat.split('|')[0] for cat in ALLOWED_CATEGORIES])
def categories_status():
counts = defaultdict(lambda: 0)
for cat in map(lambda x: x.split('|')[0], ALLOWED_CATEGORIES):
counts[cat] = count_files_by_category(IMAGE_SAVE_PATH, cat)
return render_template('status.html', counts=sorted(counts.items()), expected_number=EXPECTED_NUMBER)
| []
| []
| [
"XML_SAVE_PATH",
"IMAGE_SAVE_PATH"
]
| [] | ["XML_SAVE_PATH", "IMAGE_SAVE_PATH"] | python | 2 | 0 | |
setup.py | #!/usr/bin/env python
import os
import sys
import glob
import shutil
import subprocess
try:
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.core import setup, Extension
from distutils.command.build_ext import build_ext
def invoke_f2py(files, flags=[], wd=None):
from numpy.f2py import main
olddir = os.path.abspath(os.curdir)
oldargv = list(sys.argv)
try:
if wd is not None:
os.chdir(wd)
sys.argv = ['f2py']
sys.argv.extend(files)
sys.argv.extend(flags)
main()
finally:
sys.argv = oldargv
os.chdir(olddir)
def is_fortran_program(path):
with open(path, 'r') as f:
return any(line.lstrip().upper().startswith('PROGRAM ') for line in f)
class build_fsps(build_ext):
def run(self):
from fsps import check_fsps_version
check_fsps_version()
# Generate the Fortran signature/interface.
files = ['fsps.f90']
flags = "-m _fsps -h fsps.pyf --overwrite-signature".split()
print("Running f2py on {0} with flags {1}".format(files, flags))
invoke_f2py(['fsps.f90'], flags, wd='fsps')
# Compile FSPS
fsps_dir = os.path.join(os.environ["SPS_HOME"], "src")
os.environ['F90FLAGS'] = os.environ.get('F90FLAGS', '') + " -fPIC"
fns = [f.rsplit('.', 1)[0] + '.o'
for f in glob.glob(os.path.join(fsps_dir, '*.f90'))
if not is_fortran_program(f)]
return_code = subprocess.call(['make', '-C'+fsps_dir] + fns)
if return_code != 0:
sys.exit(return_code)
# Add the interface source files to the file list.
fns += ["fsps.f90", "fsps.pyf"]
# Compile the library.
flags = '-c -I{0} --f90flags=-cpp --f90flags=-fPIC'.format(fsps_dir)
flags = flags.split()
print("Running f2py on {0} with flags {1}".format(fns, flags))
invoke_f2py(fns, flags, wd='fsps')
# Move the compiled library to the correct directory.
infn = os.path.abspath(self.get_ext_filename("fsps._fsps"))
outfn = os.path.abspath(self.get_ext_fullpath("fsps._fsps"))
if infn != outfn:
try:
os.makedirs(os.path.dirname(outfn))
except os.error:
pass
print("Copying {0} to {1}".format(infn, outfn))
shutil.copyfile(infn, outfn)
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Hackishly inject a constant into builtins to enable importing of the
# package before the library is built.
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__FSPS_SETUP__ = True
from fsps import __version__ # NOQA
# This is a fake extension that is used to trick distutils into building our
# real library using the `build_fsps` function above even when `install` is
# called.
ext = Extension("fsps._fsps", sources=["fsps/fsps.f90"])
# The final setup command. Note: we override the `build_ext` command with our
# custom version from above.
setup(
name="fsps",
url="https://github.com/dfm/python-fsps",
version=__version__,
author="Dan Foreman-Mackey",
author_email="[email protected]",
description="Python bindings for Charlie Conroy's FSPS.",
long_description=open("README.rst").read(),
packages=["fsps"],
package_data={
"": ["README.rst", "LICENSE.rst", "AUTHORS.rst"],
"fsps": ["_fsps.so"],
},
include_package_data=True,
ext_modules=[ext],
scripts=glob.glob("scripts/*.py"),
cmdclass={
"build_ext": build_fsps,
},
classifiers=[
# "Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=[
'numpy<=1.16; python_version=="2.7"',
'numpy; python_version>="3.5"',
'pygit2<=0.28; python_version=="2.7"',
'pygit2; python_version>="3.5"',
],
)
| []
| []
| [
"F90FLAGS",
"SPS_HOME"
]
| [] | ["F90FLAGS", "SPS_HOME"] | python | 2 | 0 | |
kickstarter_django/kickstarter_django/wsgi.py | """
WSGI config for kickstarter_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kickstarter_django.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
example/peer/node.go | // Copyright ยฉ 2020 AMIS Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package peer
import (
"context"
"fmt"
"math/rand"
"github.com/getamis/sirius/log"
ggio "github.com/gogo/protobuf/io"
"github.com/golang/protobuf/proto"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/helpers"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/multiformats/go-multiaddr"
)
// MakeBasicHost creates a LibP2P host.
func MakeBasicHost(port int64) (host.Host, error) {
sourceMultiAddr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port))
if err != nil {
return nil, err
}
priv, err := generateIdentity(port)
if err != nil {
return nil, err
}
opts := []libp2p.Option{
libp2p.ListenAddrs(sourceMultiAddr),
libp2p.Identity(priv),
}
basicHost, err := libp2p.New(context.Background(), opts...)
if err != nil {
return nil, err
}
return basicHost, nil
}
// getPeerAddr gets peer full address from port.
func getPeerAddr(port int64) (string, error) {
priv, err := generateIdentity(port)
if err != nil {
return "", err
}
pid, err := peer.IDFromPrivateKey(priv)
if err != nil {
return "", err
}
return fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", port, pid), nil
}
// generateIdentity generates a fixed key pair by using port as random source.
func generateIdentity(port int64) (crypto.PrivKey, error) {
// Use the port as the randomness source in this example.
r := rand.New(rand.NewSource(port))
// Generate a key pair for this host.
priv, _, err := crypto.GenerateKeyPairWithReader(crypto.ECDSA, 2048, r)
if err != nil {
return nil, err
}
return priv, nil
}
// send sends the proto message to specified peer.
func send(ctx context.Context, host host.Host, target string, data proto.Message, protocol protocol.ID) error {
// Turn the destination into a multiaddr.
maddr, err := multiaddr.NewMultiaddr(target)
if err != nil {
log.Warn("Cannot parse the target address", "target", target, "err", err)
return err
}
// Extract the peer ID from the multiaddr.
info, err := peer.AddrInfoFromP2pAddr(maddr)
if err != nil {
log.Warn("Cannot parse addr", "addr", maddr, "err", err)
return err
}
s, err := host.NewStream(ctx, info.ID, protocol)
if err != nil {
log.Warn("Cannot create a new stream", "from", host.ID(), "to", target, "err", err)
return err
}
writer := ggio.NewFullWriter(s)
err = writer.WriteMsg(data)
if err != nil {
log.Warn("Cannot write message to IO", "err", err)
return err
}
err = helpers.FullClose(s)
if err != nil {
log.Warn("Cannot close the stream", "err", err)
return err
}
log.Info("Sent message", "peer", target)
return nil
}
// connect connects the host to the specified peer.
func connect(ctx context.Context, host host.Host, target string) error {
// Turn the destination into a multiaddr.
maddr, err := multiaddr.NewMultiaddr(target)
if err != nil {
log.Warn("Cannot parse the target address", "target", target, "err", err)
return err
}
// Extract the peer ID from the multiaddr.
info, err := peer.AddrInfoFromP2pAddr(maddr)
if err != nil {
log.Error("Cannot parse addr", "addr", maddr, "err", err)
return err
}
// Connect the host to the peer.
err = host.Connect(ctx, *info)
if err != nil {
log.Warn("Failed to connect to peer", "err", err)
return err
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
website/cmd/genblog/render.go | package main
import (
"fmt"
"log"
"os"
"text/template"
"time"
)
// This file contains functions and types for rendering the blog.
// baseDot is the base for all "dot" structures used as the environment of the
// HTML template.
type baseDot struct {
BlogTitle string
Author string
RootURL string
HomepageTitle string
Categories []categoryMeta
CategoryMap map[string]string
BaseCSS string
}
func newBaseDot(bc *blogConf, css string) *baseDot {
b := &baseDot{bc.Title, bc.Author, bc.RootURL,
bc.Index.Title, bc.Categories, make(map[string]string), css}
for _, m := range bc.Categories {
b.CategoryMap[m.Name] = m.Title
}
return b
}
type articleDot struct {
*baseDot
article
}
type categoryDot struct {
*baseDot
Category string
Prelude string
Articles []articleMeta
ExtraCSS string
ExtraJS string
}
type feedDot struct {
*baseDot
Articles []article
LastModified rfc3339Time
}
// rfc3339Time wraps time.Time to provide a RFC3339 String() method.
type rfc3339Time time.Time
func (t rfc3339Time) String() string {
return time.Time(t).Format(time.RFC3339)
}
// contentIs generates a code snippet to fix the free reference "content" in
// the HTML template.
func contentIs(what string) string {
return fmt.Sprintf(
`{{ define "content" }} {{ template "%s-content" . }} {{ end }}`,
what)
}
func newTemplate(name, root string, sources ...string) *template.Template {
t := template.New(name).Funcs(template.FuncMap(map[string]interface{}{
"is": func(s string) bool { return s == name },
"rootURL": func() string { return root },
"getEnv": os.Getenv,
}))
for _, source := range sources {
template.Must(t.Parse(source))
}
return t
}
func openForWrite(fname string) *os.File {
file, err := os.OpenFile(fname, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
log.Fatal(err)
}
return file
}
func executeToFile(t *template.Template, data interface{}, fname string) {
file := openForWrite(fname)
defer file.Close()
err := t.Execute(file, data)
if err != nil {
log.Fatalf("rendering %q: %s", fname, err)
}
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
logger/adapter.go | package logger
import (
"github.com/viant/datly/shared"
"os"
"time"
)
type (
Adapters []*Adapter
AdapterIndex map[string]*Adapter
Adapter struct {
shared.Reference
Name string
readTime ReadTime
readingData ReadingData
objectReconciling ObjectReconciling
columnsDetection ColumnsDetection
}
)
func (i AdapterIndex) Lookup(name string) (*Adapter, bool) {
adapter, ok := i[name]
return adapter, ok
}
func (i AdapterIndex) Register(adapter *Adapter) {
i[adapter.Name] = adapter
}
func (a Adapters) Index() AdapterIndex {
result := AdapterIndex{}
for i := range a {
result[a[i].Name] = a[i]
}
return result
}
func (l *Adapter) ColumnsDetection(sql, source string) {
if l.columnsDetection == nil {
return
}
l.columnsDetection(sql, source)
}
func (l *Adapter) ObjectReconciling(dst, item, parent interface{}, index int) {
if l.objectReconciling == nil {
return
}
l.objectReconciling(dst, item, parent, index)
}
func (l *Adapter) ReadingData(duration time.Duration, sql string, read int, params []interface{}, err error) {
if l.readingData == nil {
return
}
l.readingData(duration, sql, read, params, err)
}
func (l *Adapter) ReadTime(viewName string, start, end *time.Time, err error) {
if l.readTime == nil {
return
}
l.readTime(viewName, start, end, err)
}
func (l *Adapter) Inherit(adapter *Adapter) {
l.readTime = adapter.readTime
l.readingData = adapter.readingData
l.objectReconciling = adapter.objectReconciling
l.columnsDetection = adapter.columnsDetection
}
func NewLogger(name string, logger Logger) *Adapter {
if logger == nil {
return &Adapter{
Name: name,
}
}
return &Adapter{
Name: name,
Reference: shared.Reference{},
readTime: logger.ViewReadTime(),
readingData: logger.ReadingData(),
objectReconciling: logger.ObjectReconciling(),
columnsDetection: logger.ColumnsDetection(),
}
}
func Default() *Adapter {
if os.Getenv("DATLY_DEBUG") == "" {
return NewLogger("", nil)
}
return NewLogger("", &defaultLogger{})
}
| [
"\"DATLY_DEBUG\""
]
| []
| [
"DATLY_DEBUG"
]
| [] | ["DATLY_DEBUG"] | go | 1 | 0 | |
pipeline.py | '''===================================================================
Copyright 2019 Matthias Komm, Vilius Cepaitis, Robert Bainbridge,
Alex Tapper, Oliver Buchmueller. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.See the License for the specific language governing
permissions and limitations under the License.
==================================================================='''
import os
import glob
import numpy as np
import tensorflow as tf
from keras import backend as K
import rtf
import ROOT
from features import feature_dict
def create_weight_histograms(input_file_list,features):
hists_per_class = []
for name in features['truth']
def input_pipeline(input_file_list, features, batch_size, repeat = 1, max_threads = 6):
with tf.device('/cpu:0'):
file_list_queue = tf.train.string_input_producer(
input_file_list,
num_epochs=repeat,
shuffle=True
)
rootreader_op = []
resamplers = []
if os.environ.has_key('OMP_NUM_THREADS'):
try:
max_threads = max(1,int(os.environ["OMP_NUM_THREADS"]))
except Exception:
pass
for _ in range(min(len(input_file_list),max_threads)):
reader_batch = max(10,int(batch_size/20.))
reader = rtf.root_reader(file_list_queue, features, "jets", batch=reader_batch).batch()
rootreader_op.append(reader)
'''
if resample:
weight = classification_weights(
reader["truth"],
reader["globalvars"],
os.path.join(outputFolder, "weights.root"),
branchNameList,
[0, 1]
)
resampled = resampler(
weight,
reader
).resample()
resamplers.append(resampled)
'''
batch = tf.train.shuffle_batch_join(
rootreader_op,
batch_size=batch_size,
capacity=5*batch_size,
min_after_dequeue=2*batch_size,
enqueue_many=True
)
is_signal = batch["truth"][:, 4] > 0.5
batch["gen"] = rtf.fake_background(batch["gen"], is_signal, 0)
return batch
train_batch = input_pipeline(
glob.glob('Samples/QCD_Pt-15to7000_unpacked_train1_[0-9]*.root'),
feature_dict,
batch_size=100
)
test_batch = input_pipeline(
glob.glob('Samples/QCD_Pt-15to7000_unpacked_test1_[0-9]*.root'),
feature_dict,
batch_size=100
)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
sess = K.get_session()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
try:
while not coord.should_stop():
step += 1
train_batch_value = sess.run(train_batch)
#if step==1:
# print train_batch_value
if step%50==0:
print "step",step
for k in train_batch_value.keys():
print " "*4,k,":",train_batch_value[k].shape
except tf.errors.OutOfRangeError:
print 'Done reading files for %d steps.' % (step)
| []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
train.py | import argparse
import logging
import os
import torch
import torch.distributed as dist
from ssd.data.loaders import AdainLoader
from ssd.engine.inference import do_evaluation
from ssd.config import cfg
from ssd.data.build import make_data_loader
from ssd.engine.trainer import do_train
from ssd.modeling.detector import build_detection_model
from ssd.solver.build import make_optimizer, make_lr_scheduler
from ssd.utils import dist_util, mkdir
from ssd.utils.checkpoint import CheckPointer
from ssd.utils.dist_util import synchronize
from ssd.utils.logger import setup_logger
from ssd.utils.misc import str2bool
def train(cfg, args):
logger = logging.getLogger('SSD.trainer')
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus
optimizer = make_optimizer(cfg, model, lr)
milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
scheduler = make_lr_scheduler(cfg, optimizer, milestones)
arguments = {"iteration": 0}
save_to_disk = dist_util.get_rank() == 0
checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger)
extra_checkpoint_data = checkpointer.load()
arguments.update(extra_checkpoint_data)
max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
train_loader = make_data_loader(cfg, phase="train", distributed=args.distributed, max_iter=max_iter,
start_iter=arguments['iteration'])
if args.enable_style_transfer:
style_loader = make_data_loader(cfg, phase="style", distributed=args.distributed, max_iter=max_iter,
start_iter=arguments['iteration'])
train_loader = AdainLoader(cfg=cfg, content_loader=train_loader, style_loader=style_loader)
model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args)
return model
def main():
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training With PyTorch')
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--log_step', default=10, type=int, help='Print logs every log_step')
parser.add_argument('--save_step', default=2500, type=int, help='Save checkpoint every save_step')
parser.add_argument('--eval_step', default=2500, type=int,
help='Evaluate dataset every eval_step, disabled when eval_step < 0')
parser.add_argument('--use_tensorboard', default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
# Style transfer arguments section
parser.add_argument(
"--enable_style_transfer",
dest="enable_style_transfer",
help="Enable style transferring using AdaIN",
action="store_true"
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
if torch.cuda.is_available():
# This flag allows you to enable the inbuilt cudnn auto-tuner to
# find the best algorithm to use for your hardware.
torch.backends.cudnn.benchmark = True
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# Check style transfer arguments
if args.enable_style_transfer:
print(cfg.ADAIN)
assert os.path.exists(cfg.ADAIN.IMPL_FOLDER)
assert len(cfg.ADAIN.DATASETS.STYLE) > 0
assert os.path.exists(cfg.ADAIN.MODEL.VGG)
assert os.path.exists(cfg.ADAIN.MODEL.DECODER)
if cfg.OUTPUT_DIR:
mkdir(cfg.OUTPUT_DIR)
logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR)
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args)
if not args.skip_test:
logger.info('Start evaluating...')
torch.cuda.empty_cache() # speed up evaluating after training finished
do_evaluation(cfg, model, distributed=args.distributed)
if __name__ == '__main__':
main()
| []
| []
| [
"WORLD_SIZE"
]
| [] | ["WORLD_SIZE"] | python | 1 | 0 | |
elodie/tests/media/photo_test.py | # -*- coding: utf-8
# Project imports
from __future__ import unicode_literals
import os
import sys
from datetime import datetime
import shutil
import tempfile
import time
from nose.plugins.skip import SkipTest
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))))
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import helper
from elodie.media.media import Media
from elodie.media.photo import Photo
os.environ['TZ'] = 'GMT'
setup_module = helper.setup_module
teardown_module = helper.teardown_module
def test_photo_extensions():
photo = Photo()
extensions = photo.extensions
assert 'arw' in extensions
assert 'cr2' in extensions
assert 'dng' in extensions
assert 'gif' in extensions
assert 'heic' in extensions
assert 'jpg' in extensions
assert 'jpeg' in extensions
assert 'nef' in extensions
assert 'rw2' in extensions
valid_extensions = Photo.get_valid_extensions()
assert extensions == valid_extensions, valid_extensions
def test_empty_album():
photo = Photo(helper.get_file('plain.jpg'))
assert photo.get_album() is None
def test_has_album():
photo = Photo(helper.get_file('with-album.jpg'))
album = photo.get_album()
assert album == 'Test Album', album
def test_is_valid():
photo = Photo(helper.get_file('plain.jpg'))
assert photo.is_valid()
def test_is_not_valid():
photo = Photo(helper.get_file('text.txt'))
assert not photo.is_valid()
def test_get_metadata_of_invalid_photo():
photo = Photo(helper.get_file('invalid.jpg'))
metadata = photo.get_metadata()
assert metadata is None
def test_get_coordinate_default():
photo = Photo(helper.get_file('with-location.jpg'))
coordinate = photo.get_coordinate()
assert helper.isclose(coordinate,37.3667027222), coordinate
def test_get_coordinate_latitude():
photo = Photo(helper.get_file('with-location.jpg'))
coordinate = photo.get_coordinate('latitude')
assert helper.isclose(coordinate,37.3667027222), coordinate
def test_get_coordinate_latitude_minus():
photo = Photo(helper.get_file('with-location-inv.jpg'))
coordinate = photo.get_coordinate('latitude')
assert helper.isclose(coordinate,-37.3667027222), coordinate
def test_get_coordinate_longitude():
photo = Photo(helper.get_file('with-location.jpg'))
coordinate = photo.get_coordinate('longitude')
assert helper.isclose(coordinate,-122.033383611), coordinate
def test_get_coordinate_longitude_plus():
photo = Photo(helper.get_file('with-location-inv.jpg'))
coordinate = photo.get_coordinate('longitude')
assert helper.isclose(coordinate,122.033383611), coordinate
def test_get_coordinates_without_exif():
photo = Photo(helper.get_file('no-exif.jpg'))
latitude = photo.get_coordinate('latitude')
longitude = photo.get_coordinate('longitude')
assert latitude is None, latitude
assert longitude is None, longitude
def test_get_coordinates_with_zero_coordinate():
photo = Photo(helper.get_file('with-location-zero-coordinate.jpg'))
latitude = photo.get_coordinate('latitude')
longitude = photo.get_coordinate('longitude')
assert helper.isclose(latitude,51.55325), latitude
assert helper.isclose(longitude,-0.00417777777778), longitude
def test_get_coordinates_with_null_coordinate():
photo = Photo(helper.get_file('with-null-coordinates.jpg'))
latitude = photo.get_coordinate('latitude')
longitude = photo.get_coordinate('longitude')
assert latitude is None, latitude
assert longitude is None, longitude
def test_get_date_taken():
photo = Photo(helper.get_file('plain.jpg'))
date_taken = photo.get_date_taken()
#assert date_taken == (2015, 12, 5, 0, 59, 26, 5, 339, 0), date_taken
assert date_taken == helper.time_convert((2015, 12, 5, 0, 59, 26, 5, 339, 0)), date_taken
def test_get_date_taken_without_exif():
source = helper.get_file('no-exif.jpg')
photo = Photo(source)
date_taken = photo.get_date_taken()
date_taken_from_file = time.gmtime(min(os.path.getmtime(source), os.path.getctime(source)))
assert date_taken == date_taken_from_file, date_taken
def test_get_camera_make():
photo = Photo(helper.get_file('with-location.jpg'))
make = photo.get_camera_make()
assert make == 'Canon', make
def test_get_camera_make_not_set():
photo = Photo(helper.get_file('no-exif.jpg'))
make = photo.get_camera_make()
assert make is None, make
def test_get_camera_model():
photo = Photo(helper.get_file('with-location.jpg'))
model = photo.get_camera_model()
assert model == 'Canon EOS REBEL T2i', model
def test_get_camera_model_not_set():
photo = Photo(helper.get_file('no-exif.jpg'))
model = photo.get_camera_model()
assert model is None, model
def test_is_valid():
photo = Photo(helper.get_file('with-location.jpg'))
assert photo.is_valid()
def test_is_not_valid():
photo = Photo(helper.get_file('text.txt'))
assert not photo.is_valid()
def test_is_valid_fallback_using_pillow():
photo = Photo(helper.get_file('imghdr-error.jpg'))
assert photo.is_valid()
def test_pillow_not_loaded():
photo = Photo(helper.get_file('imghdr-error.jpg'))
photo.pillow = None
assert photo.is_valid() == False
def test_set_album():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
metadata = photo.get_metadata()
assert metadata['album'] is None, metadata['album']
status = photo.set_album('Test Album')
assert status == True, status
photo_new = Photo(origin)
metadata_new = photo_new.get_metadata()
shutil.rmtree(folder)
assert metadata_new['album'] == 'Test Album', metadata_new['album']
def test_set_date_taken_with_missing_datetimeoriginal():
# When datetimeoriginal (or other key) is missing we have to add it gh-74
# https://github.com/jmathai/elodie/issues/74
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('no-exif.jpg'), origin)
photo = Photo(origin)
status = photo.set_date_taken(datetime(2013, 9, 30, 7, 6, 5))
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
date_taken = metadata['date_taken']
shutil.rmtree(folder)
#assert date_taken == (2013, 9, 30, 7, 6, 5, 0, 273, 0), metadata['date_taken']
assert date_taken == helper.time_convert((2013, 9, 30, 7, 6, 5, 0, 273, 0)), metadata['date_taken']
def test_set_date_taken():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
status = photo.set_date_taken(datetime(2013, 9, 30, 7, 6, 5))
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
date_taken = metadata['date_taken']
shutil.rmtree(folder)
#assert date_taken == (2013, 9, 30, 7, 6, 5, 0, 273, 0), metadata['date_taken']
assert date_taken == helper.time_convert((2013, 9, 30, 7, 6, 5, 0, 273, 0)), metadata['date_taken']
def test_set_location():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
origin_metadata = photo.get_metadata()
# Verify that original photo has different location info that what we
# will be setting and checking
assert not helper.isclose(origin_metadata['latitude'], 11.1111111111), origin_metadata['latitude']
assert not helper.isclose(origin_metadata['longitude'], 99.9999999999), origin_metadata['longitude']
status = photo.set_location(11.1111111111, 99.9999999999)
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
shutil.rmtree(folder)
assert helper.isclose(metadata['latitude'], 11.1111111111), metadata['latitude']
assert helper.isclose(metadata['longitude'], 99.9999999999), metadata['longitude']
def test_set_location_minus():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
origin_metadata = photo.get_metadata()
# Verify that original photo has different location info that what we
# will be setting and checking
assert not helper.isclose(origin_metadata['latitude'], 11.1111111111), origin_metadata['latitude']
assert not helper.isclose(origin_metadata['longitude'], 99.9999999999), origin_metadata['longitude']
status = photo.set_location(-11.1111111111, -99.9999999999)
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
shutil.rmtree(folder)
assert helper.isclose(metadata['latitude'], -11.1111111111), metadata['latitude']
assert helper.isclose(metadata['longitude'], -99.9999999999), metadata['longitude']
def test_set_title():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
origin_metadata = photo.get_metadata()
status = photo.set_title('my photo title')
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
shutil.rmtree(folder)
assert metadata['title'] == 'my photo title', metadata['title']
def test_set_title_non_ascii():
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('plain.jpg'), origin)
photo = Photo(origin)
origin_metadata = photo.get_metadata()
unicode_title = u'ๅฝขๅฃฐๅญ / ๅฝข่ฒๅญ'
status = photo.set_title(unicode_title)
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
shutil.rmtree(folder)
assert metadata['title'] == unicode_title, metadata['title']
# This is a test generator that will test reading and writing to
# various RAW formats. Each sample file has a different date which
# is the only information which needs to be added to run the tests
# for that file type.
# https://nose.readthedocs.io/en/latest/writing_tests.html#test-generators
def test_various_types():
types = Photo.extensions
#extensions = ('arw', 'cr2', 'dng', 'gif', 'jpeg', 'jpg', 'nef', 'rw2')
dates = {
'arw': (2007, 4, 8, 17, 41, 18, 6, 98, 0),
'cr2': (2005, 10, 29, 16, 14, 44, 5, 302, 0),
'dng': (2009, 10, 20, 9, 10, 46, 1, 293, 0),
'heic': (2019, 5, 26, 10, 33, 20, 6, 146, 0),
'nef': (2008, 10, 24, 9, 12, 56, 4, 298, 0),
'rw2': (2014, 11, 19, 23, 7, 44, 2, 323, 0)
}
for type in types:
if type in dates:
yield (_test_photo_type_get, type, dates[type])
yield (_test_photo_type_set, type, dates[type])
def _test_photo_type_get(type, date):
temporary_folder, folder = helper.create_working_folder()
photo_name = 'photo.{}'.format(type)
photo_file = helper.get_file(photo_name)
origin = '{}/{}'.format(folder, photo_name)
if not photo_file:
photo_file = helper.download_file(photo_name, folder)
if not photo_file or not os.path.isfile(photo_file):
raise SkipTest('{} file not downlaoded'.format(type))
# downloading for each test is costly so we save it in the working directory
file_path_save_as = helper.get_file_path(photo_name)
if os.path.isfile(photo_file):
shutil.copyfile(photo_file, file_path_save_as)
shutil.copyfile(photo_file, origin)
photo = Photo(origin)
metadata = photo.get_metadata()
shutil.rmtree(folder)
assert metadata['date_taken'] == helper.time_convert(date), '{} date {}'.format(type, metadata['date_taken'])
def _test_photo_type_set(type, date):
temporary_folder, folder = helper.create_working_folder()
photo_name = 'photo.{}'.format(type)
photo_file = helper.get_file(photo_name)
origin = '{}/{}'.format(folder, photo_name)
if not photo_file:
photo_file = helper.download_file(photo_name, folder)
if not photo_file or not os.path.isfile(photo_file):
raise SkipTest('{} file not downlaoded'.format(type))
shutil.copyfile(photo_file, origin)
photo = Photo(origin)
origin_metadata = photo.get_metadata()
status = photo.set_location(11.1111111111, 99.9999999999)
assert status == True, status
photo_new = Photo(origin)
metadata = photo_new.get_metadata()
shutil.rmtree(folder)
assert metadata['date_taken'] == helper.time_convert(date), '{} date {}'.format(type, metadata['date_taken'])
assert helper.isclose(metadata['latitude'], 11.1111111111), '{} lat {}'.format(type, metadata['latitude'])
assert helper.isclose(metadata['longitude'], 99.9999999999), '{} lon {}'.format(type, metadata['latitude'])
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
visual_model/function.py | import os
import torch
import torchvision.transforms as transforms
from torch.nn.functional import mse_loss
from . import model
def extract_feature(img, device=os.getenv("visual_model.device")):
#Input: PIL image
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image = transform(img).to(device)
image = image.unsqueeze(0)
feature = model(image)
return feature
def content_loss(f1, f2):
return mse_loss(f1[2], f2[2])
def style_loss(f1, f2):
f1_gram = [gram(f) for f in f1]
f2_gram = [gram(f) for f in f2]
loss = 0
for i in range(len(f1)):
loss += mse_loss(f1_gram[i], f2_gram[i])
return loss
def gram(x):
(b, c, h, w) = x.size()
f = x.view(b, c, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (c * h * w)
return G
def get_central_feature(target_features, device=os.getenv("visual_model.device")):
n_features = len(target_features)
central_feature = []
for feature in target_features[0]:
central_feature.append(torch.zeros(feature.size()).to(device))
for target_feature in target_features:
for i, feature in enumerate(target_feature):
central_feature[i] += feature
for feature in central_feature:
feature = feature / n_features
return central_feature
def get_average_distance(target_features, central_feature, mode):
n_features = len(target_features)
average_distance = 0
for target_feature in target_features:
if mode == "content":
average_distance += content_loss(target_feature, central_feature)
elif mode == "style":
average_distance += style_loss(target_feature, central_feature)
average_distance = average_distance / n_features
return average_distance
| []
| []
| [
"visual_model.device"
]
| [] | ["visual_model.device"] | python | 1 | 0 | |
lambda/src/main/java/Email.java | import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.document.*;
import com.amazonaws.services.dynamodbv2.document.spec.QuerySpec;
import com.amazonaws.services.dynamodbv2.document.utils.ValueMap;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.services.lambda.runtime.events.SNSEvent;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailService;
import com.amazonaws.services.simpleemail.AmazonSimpleEmailServiceClientBuilder;
import com.amazonaws.services.simpleemail.model.*;
import java.text.SimpleDateFormat;
import java.util.*;
public class Email implements RequestHandler<SNSEvent,Object>{
public Object handleRequest(SNSEvent req,Context context){
String timeStamp = new SimpleDateFormat("yyyy-MM-dd_HH:mm:ss").format(Calendar.getInstance().getTime());
context.getLogger().log("Invocation started: " + timeStamp);
try {
AmazonDynamoDB dbClient = AmazonDynamoDBClientBuilder.standard().withRegion("us-east-1").build();
AmazonSimpleEmailService sesClient = AmazonSimpleEmailServiceClientBuilder.standard().withRegion("us-east-1").build();
DynamoDB dynamoDB = new DynamoDB(dbClient);
Table table = dynamoDB.getTable(System.getenv("TABLENAME"));
//#TODO env1-----------
//context.getLogger().log(req.getRecords().get(0).getSNS().getMessage());
// String topicmsg=req.getRecords().get(0).getSNS().getMessage();
UUID uuid = UUID.randomUUID();
String token = uuid.toString();
List<SNSEvent.SNSRecord> lstSNSRecord = req.getRecords();
for (SNSEvent.SNSRecord record : lstSNSRecord) {
if (record != null) {
context.getLogger().log("SNSRecord found");
String email = record.getSNS().getMessage();
// String currentTs = new SimpleDateFormat("MMM dd yyyy HH:mm:ss.SSS zzz").format(Calendar.getInstance().getTime());
Date todayCal = Calendar.getInstance().getTime();
SimpleDateFormat crunchifyFor = new SimpleDateFormat("MMM dd yyyy HH:mm:ss.SSS zzz");
String curTime = crunchifyFor.format(todayCal);
Date curDate = crunchifyFor.parse(curTime);
Long epoch = curDate.getTime();
String currentTs=epoch.toString();
context.getLogger().log("Time for resource retrieval " + currentTs);
QuerySpec querySpec = new QuerySpec().withKeyConditionExpression("id = :vid").withFilterExpression("ttl_timestamp > :vtimeStamp")
.withValueMap(new ValueMap().withString(":vid", email).withString(":vtimeStamp", currentTs));
ItemCollection<QueryOutcome> itemcollection = table.query(querySpec);
Iterator<Item> iterator = itemcollection.iterator();
if (iterator.hasNext() == false) {
context.getLogger().log("Entry could not be found for " + email);
Calendar cal = Calendar.getInstance();
cal.add(Calendar.MINUTE, Integer.parseInt(System.getenv("TTL")));
//#TODO env2----------
Date currentDate = cal.getTime();
SimpleDateFormat crunchifyFormat = new SimpleDateFormat("MMM dd yyyy HH:mm:ss.SSS zzz");
String currentTime = crunchifyFormat.format(currentDate);
//String link = "http://assignment1-0.0.1-SNAPSHOT/reset?email=" + email+"@"+System.getenv("DOMAIN_NAME") + "&token=" + token;
// Long epochTime = date.getTime();
//#TODO--->verify url
//#TODO env3-----------
String link = System.getenv("DOMAIN_NAME")+"/"+"reset?email="+email+"&token="+token;
Date date = crunchifyFormat.parse(currentTime);
Long ts = date.getTime();
Item item = new Item();
item.withPrimaryKey("id", email);
item.with("ttl_timestamp", ts.toString());
item.with("Subject", "Password Reset Link");
item.with("link", link);
context.getLogger().log("Logging time:" + ts.toString());
PutItemOutcome outcome = table.putItem(item);
SendEmailRequest request = new SendEmailRequest().withDestination(new Destination().withToAddresses(email)).withMessage(new Message()
.withBody(new Body()
.withText(new Content()
.withCharset("UTF-8").withData("Password reset Link:" + link)))
.withSubject(new Content()
.withCharset("UTF-8").withData("Password Reset Link")))
.withSource(System.getenv("FROM_EMAIL"));
//#TODO env4---------------
sesClient.sendEmail(request);
context.getLogger().log("Email sent to "+email+" !");
}
else{
Item item=iterator.next();
context.getLogger().log("user found");
context.getLogger().log("username:"+item.getString("id"));
context.getLogger().log("ttl timestamp:"+item.getString("ttl_timestamp"));
}
//GetSendStatisticsResult a= sesClient.getSendStatistics();
}
}
}
catch(Exception e){
context.getLogger().log("Error message: " + e.getMessage()+"stack: "+e.getStackTrace()[e.getStackTrace().length -1].getLineNumber());
// context.getLogger().log("Exception: "+e.getMessage());
context.getLogger().log(e.getStackTrace()[e.getStackTrace().length -1].getFileName());
}
timeStamp = new SimpleDateFormat("yyyy-MM-dd_HH:mm:ss").format(Calendar.getInstance().getTime());
context.getLogger().log("Invocation completed: " + timeStamp);
return null;
}
}
| [
"\"TABLENAME\"",
"\"TTL\"",
"\"DOMAIN_NAME\"",
"\"DOMAIN_NAME\"",
"\"FROM_EMAIL\""
]
| []
| [
"TABLENAME",
"FROM_EMAIL",
"DOMAIN_NAME",
"TTL"
]
| [] | ["TABLENAME", "FROM_EMAIL", "DOMAIN_NAME", "TTL"] | java | 4 | 0 | |
utils/distribution_utils.py | # ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
import os
from distutils.version import LooseVersion
def get_tpu_strategy(name=None):
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(name)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
if LooseVersion(tf.version.VERSION) < LooseVersion("2.4.0"):
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
else:
strategy = tf.distribute.TPUStrategy(cluster_resolver)
return strategy
def get_cpu_strategy():
return tf.distribute.OneDeviceStrategy("/cpu:0")
def get_distribution_strategy(gpu_memory_growth=True, cuda_visible_devices=None, use_tpu=False, tpu_name=None):
if use_tpu:
if tpu_name == "colab":
tpu_name = None
return get_tpu_strategy(tpu_name)
if cuda_visible_devices is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
set_gpu_memory_growth(gpu_memory_growth)
gpu_counts = get_gpu_counts()
cross_device_ops = None
if os.name == "nt":
cross_device_ops = tf.distribute.HierarchicalCopyAllReduce()
if gpu_counts == 1:
# strategy = tf.distribute.OneDeviceStrategy("/gpu:0")
strategy = tf.distribute.MirroredStrategy()
else:
# issue 41539 may be fixed: https://github.com/tensorflow/tensorflow/issues/41539
strategy = tf.distribute.MirroredStrategy(cross_device_ops=cross_device_ops)
# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(communication= tf.distribute.experimental.CollectiveCommunication.RING)
return strategy
def set_gpu_memory_growth(growth=False):
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, growth)
except RuntimeError as e:
print(e)
def get_gpu_counts():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
return len(gpus)
else:
return 0
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
test/e2e/image_scp_test.go | package integration
import (
"io/ioutil"
"os"
"github.com/containers/common/pkg/config"
. "github.com/containers/podman/v3/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("podman image scp", func() {
ConfPath := struct {
Value string
IsSet bool
}{}
var (
tempdir string
podmanTest *PodmanTestIntegration
)
BeforeEach(func() {
ConfPath.Value, ConfPath.IsSet = os.LookupEnv("CONTAINERS_CONF")
conf, err := ioutil.TempFile("", "containersconf")
if err != nil {
panic(err)
}
os.Setenv("CONTAINERS_CONF", conf.Name())
tempdir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
})
AfterEach(func() {
podmanTest.Cleanup()
os.Remove(os.Getenv("CONTAINERS_CONF"))
if ConfPath.IsSet {
os.Setenv("CONTAINERS_CONF", ConfPath.Value)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
f := CurrentGinkgoTestDescription()
processTestResult(f)
})
It("podman image scp quiet flag", func() {
if IsRemote() {
Skip("this test is only for non-remote")
}
scp := podmanTest.Podman([]string{"image", "scp", "-q", ALPINE})
scp.WaitWithDefaultTimeout()
Expect(scp).To(Exit(0))
})
It("podman image scp root to rootless transfer", func() {
SkipIfNotRootless("this is a rootless only test, transfering from root to rootless using PodmanAsUser")
if IsRemote() {
Skip("this test is only for non-remote")
}
env := os.Environ()
img := podmanTest.PodmanAsUser([]string{"image", "pull", ALPINE}, 0, 0, "", env) // pull image to root
img.WaitWithDefaultTimeout()
Expect(img).To(Exit(0))
scp := podmanTest.PodmanAsUser([]string{"image", "scp", "root@localhost::" + ALPINE, "1000:1000@localhost::"}, 0, 0, "", env) //transfer from root to rootless (us)
scp.WaitWithDefaultTimeout()
Expect(scp).To(Exit(0))
list := podmanTest.Podman([]string{"image", "list"}) // our image should now contain alpine loaded in from root
list.WaitWithDefaultTimeout()
Expect(list).To(Exit(0))
Expect(list.LineInOutputStartsWith("quay.io/libpod/alpine")).To(BeTrue())
})
It("podman image scp bogus image", func() {
if IsRemote() {
Skip("this test is only for non-remote")
}
scp := podmanTest.Podman([]string{"image", "scp", "FOOBAR"})
scp.WaitWithDefaultTimeout()
Expect(scp).To(ExitWithError())
})
It("podman image scp with proper connection", func() {
if IsRemote() {
Skip("this test is only for non-remote")
}
cmd := []string{"system", "connection", "add",
"--default",
"QA",
"ssh://[email protected]:2222/run/podman/podman.sock",
}
session := podmanTest.Podman(cmd)
session.WaitWithDefaultTimeout()
Expect(session).To(Exit(0))
cfg, err := config.ReadCustomConfig()
Expect(err).ShouldNot(HaveOccurred())
Expect(cfg.Engine.ActiveService).To(Equal("QA"))
Expect(cfg.Engine.ServiceDestinations["QA"]).To(Equal(
config.Destination{
URI: "ssh://[email protected]:2222/run/podman/podman.sock",
},
))
scp := podmanTest.Podman([]string{"image", "scp", ALPINE, "QA::"})
scp.Wait(45)
// exit with error because we cannot make an actual ssh connection
// This tests that the input we are given is validated and prepared correctly
// Error: failed to connect: dial tcp: address foo: missing port in address
Expect(scp).To(ExitWithError())
Expect(scp.ErrorToString()).To(ContainSubstring(
"Error: failed to connect: dial tcp 66.151.147.142:2222: i/o timeout",
))
})
})
| [
"\"CONTAINERS_CONF\""
]
| []
| [
"CONTAINERS_CONF"
]
| [] | ["CONTAINERS_CONF"] | go | 1 | 0 | |
train.py | import torch
import argparse
from IO import DataLoader
import models
from torch.autograd import Variable
import math
from random import shuffle
parser = argparse.ArgumentParser()
# Data option
parser.add_argument('-input_img', type=str, default='data/data_img.h5')
parser.add_argument('-input_data', type=str, default='data/visdial_data.h5')
parser.add_argument('-input_json', type=str,
default='data/visdial_params.json')
# Model option
parser.add_argument('-embed_dim', type=int, default=300)
parser.add_argument('-rnn_dim', type=int, default=512)
parser.add_argument('-fact_dim', type=int, default=512)
parser.add_argument('-history_dim', type=int, default=512)
parser.add_argument('-num_layers', type=int, default=2,
help='Number of nlayers in LSTM')
parser.add_argument('-img_norm', type=int, default=1)
parser.add_argument('-img_feature_size', type=int, default=4096)
parser.add_argument('-max_history_len', type=int, default=40)
# Training option
parser.add_argument('-dropout', type=float, default=0)
parser.add_argument('-batch_size', type=int, default=64,
help="""Batch size (number of threads)
(Adjust base on GPU memory)""")
parser.add_argument('-lr', type=float, default=0.001,
help="learning rate.")
parser.add_argument('-max_grad_norm', type=float, default=5,
help="clip gradient at this value")
parser.add_argument('-report_every', type=int, default=50)
parser.add_argument('-save_model', type=str, default='visdialog.pt',
help='trained parameters')
# the following options are extremely useful when the data is small
parser.add_argument('-share_decoder_embeddings', action='store_true',
help='Share the word and out embeddings for decoder.')
parser.add_argument('-share_qa_embeddings', action='store_true',
help='share word embeddings between QBot and ABot')
parser.add_argument('-num_epochs', type=int, default=15, help='Epochs')
parser.add_argument('-cuda', action='store_true',
help='enable training with cuda')
opt = parser.parse_args()
# utils prepare the data
def truncate(x, padding_idx=0):
# cut off all unncessary paddings, this makes generation faster
# because we don't have to predict pad
# x (batch, rounds, length)
masked_pad = x.ne(padding_idx).sum(-1)
max_length = masked_pad.int().max()
return x[:, :, :max_length].contiguous()
# build dataloader
loader = DataLoader(opt, ['train', 'val'])
vocab_size = loader.vocab_size
opt.vocab_size = vocab_size
bos_idx = loader.data['word2ind']['<START>']
eos_idx = loader.data['word2ind']['<EOS>']
def pad(input, bos_idx, eos_idx, padding_idx=0):
"""pad <START> and <EOS>
input (LongTensor) of size batch_size, n_rounds, max_length
"""
# (1) 3D -> 2D tensor
batch_size, n_rounds, length = input.size()
input_2d = input.view(-1, length)
real_lens = input_2d.ne(padding_idx).sum(-1).int()
new_len = length + 2
pad_inp = torch.LongTensor(batch_size * n_rounds, new_len) \
.fill_(padding_idx)
pad_inp[:, 0] = bos_idx
pad_inp[:, 1:length+1] = input_2d # copy shit
idx = [i for i in range(pad_inp.size(0))]
pad_inp[idx, (real_lens + 1).tolist()] = eos_idx
# (2) 2D -> 3D
return pad_inp.view(batch_size, n_rounds, -1)
def prepare(batch, eval=False):
"""Batch is a tuple of (c, q, a), each tensor is batch_first"""
c, img, q, a, *_ = batch
# truncate
q = pad(q, bos_idx, eos_idx)
q = truncate(q)
batch_size, n_rounds, q_len = q.size()
a = pad(a, bos_idx, eos_idx)
a = truncate(a)
ret = [c, img, q, a]
if opt.cuda:
ret = [x.cuda() for x in ret]
# wrap by Variable
ret = [Variable(x, volatile=eval) for x in ret]
return ret
# build models
qbot = models.QBot(opt)
abot = models.ABot(opt)
if opt.share_qa_embeddings:
qbot.embeddings.weight = abot.embeddings.weight
train_data = loader.batchify('train', opt.batch_size)
valid_data = loader.batchify('val', opt.batch_size)
if opt.cuda:
qbot.cuda()
abot.cuda()
def eval(valid_data, bots):
# switch to eval mode
bots.eval()
tot_loss = 0
nsamples = 0
for i, batch in enumerate(valid_data):
c, img, q, a = prepare(batch, True)
nsamples += c.size(0)
loss_q_text, loss_q_img, loss_a_text = bots(c, img, q, a)
tot_loss += loss_q_text + loss_q_img + loss_a_text
loss = tot_loss.data[0] / nsamples
# resume training mode
bots.train()
return loss
def train_bots(opt):
if opt.share_qa_embeddings:
qbot.embeddings.weight = abot.embeddings.weight
bots = models.QABots(qbot, abot)
print('Training bots...')
optimizer = torch.optim.Adam(bots.parameters(), lr=opt.lr)
for e in range(1, opt.num_epochs + 1):
nbatch = len(train_data)
# shuffle training data
shuffle(train_data)
for i, batch in enumerate(train_data):
c, img, q, a, *_ = prepare(batch)
n_q_words = q[:, :, 1:].data.ne(0).int().sum()
batch_size = c.size(0)
bots.zero_grad()
loss_q_text, loss_q_img, loss_a_text = bots(c, img, q, a)
ppl_q = math.exp(loss_q_text.data[0] / n_q_words)
tot_loss = loss_q_text + loss_q_img + loss_a_text
tot_loss.div(batch_size).backward()
torch.nn.utils.clip_grad_norm(bots.parameters(), opt.max_grad_norm)
optimizer.step()
n_a_words = a[:, :, 1:].data.ne(0).int().sum()
ppl_a = math.exp(loss_a_text.data[0] / n_a_words)
if i % opt.report_every == 0 and i > 0:
msg = 'Epoch %d | update %4d / %d | total loss %.1f | ' \
+ 'question ppl %.1f | answer ppl %.1f'
msg = msg % (e, i, nbatch, tot_loss.data[0] / batch_size,
ppl_q, ppl_a)
print(msg)
print('Evaluate!')
val_loss = eval(valid_data, bots)
print('Validation loss: %.3f' % val_loss)
model_state_dict = bots.state_dict()
checkpoint = {'opt': opt,
'params': model_state_dict}
torch.save(checkpoint, opt.save_model)
train_bots(opt)
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/controller/operator.go | package controller
import (
"context"
"fmt"
"os"
"time"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/version"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"github.com/openshift/insights-operator/pkg/anonymization"
"github.com/openshift/insights-operator/pkg/authorizer/clusterauthorizer"
"github.com/openshift/insights-operator/pkg/config"
"github.com/openshift/insights-operator/pkg/config/configobserver"
"github.com/openshift/insights-operator/pkg/controller/periodic"
"github.com/openshift/insights-operator/pkg/controller/status"
"github.com/openshift/insights-operator/pkg/gather"
"github.com/openshift/insights-operator/pkg/insights/insightsclient"
"github.com/openshift/insights-operator/pkg/insights/insightsreport"
"github.com/openshift/insights-operator/pkg/insights/insightsuploader"
"github.com/openshift/insights-operator/pkg/recorder"
"github.com/openshift/insights-operator/pkg/recorder/diskrecorder"
)
// Operator is the type responsible for controlling the start up of the Insights Operator
type Operator struct {
config.Controller
}
// Run starts the Insights Operator:
// 1. Gets/Creates the necessary configs/clients
// 2. Starts the configobserver and status reporter
// 3. Initiates the recorder and starts the periodic record pruneing
// 4. Starts the periodic gathering
// 5. Creates the insights-client and starts uploader and reporter
func (s *Operator) Run(ctx context.Context, controller *controllercmd.ControllerContext) error { //nolint: funlen
klog.Infof("Starting insights-operator %s", version.Get().String())
initialDelay := 0 * time.Second
cont, err := config.LoadConfig(s.Controller, controller.ComponentConfig.Object, config.ToController)
if err != nil {
return err
}
s.Controller = cont
// these are operator clients
kubeClient, err := kubernetes.NewForConfig(controller.ProtoKubeConfig)
if err != nil {
return err
}
configClient, err := configv1client.NewForConfig(controller.KubeConfig)
if err != nil {
return err
}
// these are gathering configs
gatherProtoKubeConfig := rest.CopyConfig(controller.ProtoKubeConfig)
if len(s.Impersonate) > 0 {
gatherProtoKubeConfig.Impersonate.UserName = s.Impersonate
}
gatherKubeConfig := rest.CopyConfig(controller.KubeConfig)
if len(s.Impersonate) > 0 {
gatherKubeConfig.Impersonate.UserName = s.Impersonate
}
// the metrics client will connect to prometheus and scrape a small set of metrics
// TODO: the oauth-proxy and delegating authorizer do not support Impersonate-User,
// so we do not impersonate gather
metricsGatherKubeConfig := rest.CopyConfig(controller.KubeConfig)
metricsGatherKubeConfig.CAFile = metricCAFile
metricsGatherKubeConfig.NegotiatedSerializer = scheme.Codecs
metricsGatherKubeConfig.GroupVersion = &schema.GroupVersion{}
metricsGatherKubeConfig.APIPath = "/"
metricsGatherKubeConfig.Host = metricHost
// If we fail, it's likely due to the service CA not existing yet. Warn and continue,
// and when the service-ca is loaded we will be restarted.
gatherKubeClient, err := kubernetes.NewForConfig(gatherProtoKubeConfig)
if err != nil {
return err
}
// ensure the insight snapshot directory exists
if _, err = os.Stat(s.StoragePath); err != nil && os.IsNotExist(err) {
if err = os.MkdirAll(s.StoragePath, 0777); err != nil {
return fmt.Errorf("can't create --path: %v", err)
}
}
// configobserver synthesizes all config into the status reporter controller
configObserver := configobserver.New(s.Controller, kubeClient)
go configObserver.Start(ctx)
// the status controller initializes the cluster operator object and retrieves
// the last sync time, if any was set
statusReporter := status.NewController(configClient, gatherKubeClient.CoreV1(), configObserver, os.Getenv("POD_NAMESPACE"))
var anonymizer *anonymization.Anonymizer
if anonymization.IsObfuscationEnabled(configObserver) {
// anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization
anonymizer, err = anonymization.NewAnonymizerFromConfig(ctx, gatherKubeConfig, gatherProtoKubeConfig)
if err != nil {
// in case of an error anonymizer will be nil and anonymization will be just skipped
klog.Errorf(anonymization.UnableToCreateAnonymizerErrorMessage, err)
}
}
// the recorder periodically flushes any recorded data to disk as tar.gz files
// in s.StoragePath, and also prunes files above a certain age
recdriver := diskrecorder.New(s.StoragePath)
rec := recorder.New(recdriver, s.Interval, anonymizer)
go rec.PeriodicallyPrune(ctx, statusReporter)
// the gatherers are periodically called to collect the data from the cluster
// and provide the results for the recorder
gatherers := gather.CreateAllGatherers(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, anonymizer, &s.Controller,
)
periodicGather := periodic.New(configObserver, rec, gatherers, anonymizer)
statusReporter.AddSources(periodicGather.Sources()...)
// check we can read IO container status and we are not in crash loop
initialCheckTimeout := s.Controller.Interval / 24
initialCheckInterval := 20 * time.Second
baseInitialDelay := s.Controller.Interval / 12
err = wait.PollImmediate(initialCheckInterval, wait.Jitter(initialCheckTimeout, 0.1), isRunning(ctx, gatherKubeConfig))
if err != nil {
initialDelay = wait.Jitter(baseInitialDelay, 0.5)
klog.Infof("Unable to check insights-operator pod status. Setting initial delay to %s", initialDelay)
}
go periodicGather.Run(ctx.Done(), initialDelay)
authorizer := clusterauthorizer.New(configObserver)
insightsClient := insightsclient.New(nil, 0, "default", authorizer, gatherKubeConfig)
// upload results to the provided client - if no client is configured reporting
// is permanently disabled, but if a client does exist the server may still disable reporting
uploader := insightsuploader.New(recdriver, insightsClient, configObserver, statusReporter, initialDelay)
statusReporter.AddSources(uploader)
// TODO: future ideas
//
// * poll periodically for new insights commands to run, then delegate
// * periodically dump crashlooping pod logs / save their messages
// * watch cluster version for an upgrade, go into extra capture mode
// * gather heap dumps from core components when master memory is above
// a threshold
// start reporting status now that all controller loops are added as sources
if err := statusReporter.Start(ctx); err != nil {
return fmt.Errorf("unable to set initial cluster status: %v", err)
}
// start uploading status, so that we
// know any previous last reported time
go uploader.Run(ctx)
reportGatherer := insightsreport.New(insightsClient, configObserver, uploader)
go reportGatherer.Run(ctx)
klog.Warning("started")
<-ctx.Done()
return nil
}
func isRunning(ctx context.Context, kubeConfig *rest.Config) wait.ConditionFunc {
return func() (bool, error) {
c, err := corev1client.NewForConfig(kubeConfig)
if err != nil {
return false, err
}
// check if context hasn't been canceled or done meanwhile
err = ctx.Err()
if err != nil {
return false, err
}
pod, err := c.Pods(os.Getenv("POD_NAMESPACE")).Get(ctx, os.Getenv("POD_NAME"), metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("Couldn't get Insights Operator Pod to detect its status. Error: %v", err)
}
return false, nil
}
for _, c := range pod.Status.ContainerStatuses { //nolint: gocritic
// all containers has to be in running state to consider them healthy
if c.LastTerminationState.Terminated != nil || c.LastTerminationState.Waiting != nil {
klog.Info("The last pod state is unhealthy")
return false, nil
}
}
return true, nil
}
}
| [
"\"POD_NAMESPACE\"",
"\"POD_NAMESPACE\"",
"\"POD_NAME\""
]
| []
| [
"POD_NAMESPACE",
"POD_NAME"
]
| [] | ["POD_NAMESPACE", "POD_NAME"] | go | 2 | 0 | |
.ansible/tmp/ansible-tmp-1587780252.95-166760549471698/stat.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
import os
import os.path
import sys
import __main__
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
from io import BytesIO as IOStream
except ImportError:
from StringIO import StringIO as IOStream
ZIPDATA = """UEsDBBQAAAAIAIIQmVDuFwZTYwAAAHkAAAATAAAAYW5zaWJsZS9fX2luaXRfXy5weUsrys9VKMhOLy3JzFHIzC3ILypRSK0oSc1LiS9ILMngigdT8fG2SIIaMEGd+Pi8xNzU+HhNoLqy1KLizPw8oFIlIz0TPSM9AyWgaGJpSUZ+EUjQMa84MyknVUfBMy9ZT4kLAFBLAwQUAAAACACCEJlQncXxazcAAABIAAAAIAAAAGFuc2libGUvbW9kdWxlX3V0aWxzL19faW5pdF9fLnB5SyvKz1UoyE4vLcnMUcjMLcgvKlFIrShJzUuJL0gsyeCKB1Px8bZIghowQZ34+LzE3NT4eE0uAFBLAwQUAAAACACCEJlQ8grsqVMRAADLSQAAFgAAAGFuc2libGVfbW9kdWxlX3N0YXQucHnlPGtv2zqW3/0reBvMysY6ju3Et2kWBra3TTvBtEmQpNsOikBXlmhZE0nUilIc72D++5xzSOphyU6Tpm2ANdBGFg/Pi4fnwYd3ftvLZbo3C+K9ZJUtRNzZYW9EskoDf5Edsa7bY+Ph6GWfvY5lMAs5O0/FP7ibAdj700/sPY956oTsPJ+Fgcs+BC6PJWe3+4Phf7Ku5Jy9OTv/+8npeyZStsiyRB7t7S2Xy4Ef5wOR+nuh6iH3/CTchV6D7C7rdTrzVETMtud5lqfctlkQJSLNmDOTIswzbqvvfeYFt4EMRNxnSRrEGXSI3Qy+d2w74pnjho6U0H3KslXCO53Xp5cnf3w4tj8eX71++/rqNTT800JAz8kc+5aniMs6YtZoMLL6HdbysWTmZLkEoK/4CCoBujydOy63rjd1yRNkl3v2bIXYXZFy61+dztuzN58+Hp9evb46OTsFXlLLsjq7u7udSHh5yI8Y0upotmzH87h3xF6MBvsvOnIBCG2PSzcNEpT4iF3wLA34LWfzAMYJ9E1/5UpmPGKK6061g+J1t+gnGQiRwf/Q1dGdgygInZRlgmULzsIgzu/28ji4U2qwmCuiyIm9gcH1Dvp+DmJPLCXLnNTnmeyzHCwCu3/sLoPYxo49piRkQQzcOdBfEE8SmUqcbKGYa3ILJK4A0zwPQ4JjYk6okds9MUPLRGaBrnpNAom54i/l/5sHKeowS3MOr+YiDMVyC63PCw5oSH4FC+qMQAs3UmF0FwLsV4ItMCsWVp9ZKy4tdq0xzp08hEmETfAGmLIjb/Jt5FIOlh+TENCHyTyqijoouwSgCQ18KmLQ6JzFAmYKvPRzHDtjDdCw5FbKCxPNY7ReJIYDhFS6b2A4RUwW8O7k/HJ3dDDEIU7CwIkzbUqy9yDZ8aUW3l1w9wYkeZAGHGa6VRXAupoCkwtn9CiOGGtOrMMXHVbQs53QF2mQLbYx/NrAIMseB08QBTAKVZ5pwNRAZYsUbMiJGU9TNSQgUDEgCyEzFsi1gZEJd4N5wD1W8NMQF5XQx//H4wP1d/I7/d0/VN8no/G6HrAPvXFgdKXCY9ju15Wgu67razwYvjB2HUR8i5Y+ST1skeNDmACPYYYXHS+4dZGrCRs76PBrpg5PqBPgsFAUtlnYaLE8C8IgW4HJ5oAUbDdCJX4gR/UJHZU22kHFfwC6JY4GyMFmApzIn8i+jSHiT+LNchdOKjn4N9B76Enj/xTPfRy3REiKhyXek5iNB/uKW+XNQIlO7MPAUTSzkIiFqCyjMYuoIWZjzaYHQv0d7PRpDHs8GL1YH2qk32eF4OpxFx8JL7LoZBAYZhBu5ZahfQ+uloa2hIbBCmKfhRJfgSgiJI2lXPI4eyqJ9hsSIbV+lY3rDnhCw/z3BCcnh8woPWJ/pLkLGRCP41XiuDfgp7r/PUvKr70OBvDO8ZfXH88/HF9CRMfvO+xsljmBcuZIACMS2+OZuzcXYuCKeN4nQ6A5B1BOVvo5maGlzngoYl8CKjSMVIjMGrB3ThAyge5yGUgwxF1CrqSlGFqn0cEA6AcgUoqJBYDPAYECj6QPSv28ECKRv+mIsYTMTi6ChC0caewSdb5c8BgRDJDaIFnasRNx9ttU89UBJt8WfhDG3VGBmt8BaUlywgRxTCAdQNTKuBJauUMF7gmO4mIoo55KQ0uA5BIjUaTIBzKMb2BCZmpGo0TwBP2Njx2wS4FqAy1xCGtg54AjygEHIVrqeBOQ4wXDA6ZhwHfYRR6rqYxeOPZAFZmZqxKyBxfdVF99vQmShKPvAZPzBLGcQNpJrn1HD2oCGaJIIwCbrZiFKrTaxgv/7GViTwpIShcwieqDtoo60Mvjs9yvjhspAfiPrcyIwLqFFvEtqbBXGb2a/lB05Fr33UpjjQBp8n7s2zCfb7GOb8BJfeqt30gGFfOdlNh0CtMwlFBafKPZe5CAuplIVwNWsXywyZgrt49mCbgoLlFbYuhBVwb5bO6E4UrjVbMCGBB6BmWWZJJTDow5bqtT2G5kyTePUSFKqbsar2uaq7aRtgTZq6DM06Qb2/iNVovc5+icqNWk02C6iA5TDJX4oPCuE7qQ/Wb8mzBXNdGa/2nMyr1fHF99uijKtRJprRjzAqpDnXQFCXSM7h9DokM5oIoClPpAcga0WQKson+QEIWh7qbMBP4FhCLUWYiuYTAFwRgocxciqHqLYftIZer8TgmhiJrgBx81fOX3Bsva/cK7LIA8DBMYMuE1m8NaArxFDU87V0W6RtzNIA/gTlxrkQ5yfMSuVD2mPmX918rmY+q/7cwqk55XJ2wL/+D2jYWss28VlkRJaQEDeQTfIgnlpwm6DInJjawVOJAvCDdzwu2s9xteBvKZFMw/BljHU+lGizBN1EaW0cvJpGigybptLHgYSrYSeS12173DjxFgmzUpj1zK4C4eKQOWAWBBwI/HbyFf/fWizMKbx4kyCwWkIc9FDAg3jxOjuqjx88WoualAzoO5eJwYmDJDRAySZzAWkMY8TghIhWYCV30xf/r1ckgw8McJQguanoiwRkMs/J7g9uOlyQNviyinecRT0HuAqyi6rMbsoogdVLv9GBmCOGvlfzQc7hcN/qPZ91ORJyYO/moxZPB/28L3JTRjmJ6tcLFBrZpDIgdvcBAgNY9EHmcoDC1yIQAle7SgR1ldyOXPFW9ckS6It2cnJ9jO4jyaAUVRzpmfPCDjl5PDogXi1xaO31J0I0ZJOKAoA4CA0vznMr1fajlG57h9Mmj90raCWWlU/Hch//BUQ+8nq714C4VAtM1MrqCZ2Hck1C3N5P/JWJ2HwtnA7MH4YP/g8NXL8WDyskxeo4dwDql6MA9cKnueCf/ug/jXm5gsTzwse8HZuMAOVXFdjyc89nAesLPLH2RJDxZvmcsHpeUUDWgxcglFOa8UTr84EUwfLQjRfzZy3D1aDn7H3fw5DcnST5MHi2JJnX48DxurZ4Tpd0v0zIztu+V5fkYntq9bVUWibRsJctzy52dqj5TjmRnYI6V4dmYVyO3VYGOqBPGtuMGCChmBKQN1VuRk7oLL2kwKvF9taIHcXijeK5lyBhvkM42/WsowvrGlyFN3ay5H29GmzNJ7UiwWaeSEUGZ6VD2qIwgR7hpR7UgnG55AvOIUwres7dy7GL63gCIXd51nTro3Ho2G4+Fosjs6OJgc7EM1d7D7anh48HL86uWopiK1If9QFdV20VIeQsJ7q+TAgEcrOubts1PUYPAwLeFn7RzEeHBQljvmaFmr9nCTDaLm4okrebWzt46LpFRnHiVRpb0vgEQuQG3FeciHKWx+eDh3Xo29Q3c+5py78wN36EyGruu++t0busOycqodNGvVx3foot+mCA1hpG6oo6oFBArxQFdxSqzYaSSjunWCEE+APUw7k+HMGb86cL35+NAdepOZ6/Hx5OXw0JtP9vcPfj8cT5yD8atyL0wf3di2QYVy0vEO0NRTrcap3bYYnsMQF8Hp1HFpLkH2MLHxUHFpR356n0zvKSw8c6GWy+UuVvPl5DbntLZIVjlgRysBEDGKM11PJGLDrLeJXHhLOl73pzkUt4TcHc8m9OnITgOjPhdo5fFNDKNj4aKFOsnzMBWev33HPOHmEY+zvvGbbDQYV7wEHfK7T6PlJiAerACl8tgV3jrZ/xc6zeWuI90gKEsFkAQ91aOTOKrkKK+hcw907NfxnihCtWJ5aPqGn0bULY2IircnVoEqCL9bB1vQPLESVM30NFow9dd3i78V0RMrYP2kbKsCQuDRnAev9PiRAiLJVum+siCK1JD1AVEGTlIfvt0o7b46/aRv4oD7iIX54qeJeRTSPCVLzzzSNZbOjrnFo4/XyhgPTkKuRMejHXW/aKAabTzXLQczR+JGoeqmbyB9JIAtnewMBDKdMmHT/lyn0/H4HMsoqBVtkWdJnnVVN6XiPnDZ02fecO9lqo642vilQ6932MzB87nCkKW3ChWA44mvbmVK4IBNsZIvLwUhnSkRKwM7oJ+++MvwQLxgfyFFDS7tk49nb4+RO94rIeksztRAXL49uWhCuIsKxJu/tkDMwpsS4o8Pf2tCpNwvIS6O3zch8NxDCfLu5N1ZEwZKuhLkw2kLHdxpL0Euz940YPLAm6pBgKfytV+89quvcbdWv8fHCincS9Mt8Fw2ePxWv4an8jVt2ekGeu5X5jlEXd1Ez5WBrDRF9Sa30uTWm3ATZIpuiGRn/1FYwOdPlxcVVaSb4C7qcHeb4L7U4XCBvJ3u+4vzKt1NcBd1uLtNcF/qcLhG2k737OqvVbqb4C7qcHeb4L7U4WgRrRXw8tPJ2xqgvwnwfQHYMw7hXJ/bZGqHDb3oPHR8HQVwzUao4+Ux+1rzrjvsEo8C0D2VWkPXAiOhw1oSb0XopwqDFaAbtPMCiqy+FTIF80YwdfqLyohWOGIdAdXDGohm+Q+e3oR8Bb5QrtXvhMPnMWLw6V4oxr123oM0W+BMIO6LLw2KF+DFzi5bOCUZ+uoS0GaBMMiSQOqhDUTMDCp1erQV2Q47u2RfWhRrBgDSzXCz/mkjVqQIaB7bwDbKdF0mFZA6QaKE4nTxTgRZ19fhda+edqiY9FW1jq6vITr5PGv2UmasqwPVScdJXDnrluEQw/W0HoDLUOekPlVZNh5smdbDIH4o6tFrcwFThUXKUaYWNq/rQ122VJ0UFM5IUI6+GjTFS0NrffR58G2d8DpRSy+zDPOIrliF3detby4pTb+qO2B9dResGPCiSreuW0iUmeJDCGEvY/mqdwN585x7lYCk/gV+vC244Ub0+kff7QImVCdmqYuJ5mnyu37aPzTvJqPxpsvT659SRMM/zauqLDVJK4+mjlYDTpldJUHTs4HS7ak2+kECtX8kBzAOXW2oBDSzNZhJL7sqhaSiWoK2cnjwnYzbIrWxoHYz3VNfI24loNo0oLGudtDiBmEJTJftNgJX7KCC35tsQe9NKqDFUuVG+GI4VKemdbV3bcJZRXhVJzLNXQl6maWrmjesXuEuxhnTcSHpvklXDVV56IVDWdcOHjbg+Z3Lkwyc/zFd18VN5xpxPqAyCG8B0cPg+PTs+PSqzRnTLw2oosDSxeW/amBaNQCS2f+QIu7qK3dTgu2TGqYKl9ZPpRfee1O9IulPOUiekikWmkxSQTcdMGrjsTiY07JevtxTGRlE0FeEt7y60SuLZAffUa7TtczKEExOUO2Fffa3Xh/CXLFcot5/Nu/LFQTV8AUbKnFPRzQkAIHrWg2YOm2mR6xP1CHaFayaXbQgnouOHjGFR1keFSlWr0HEKnfsLE0JCQwwyONDw6Sq/dQ2lumHWkAeii6FDSODVMnXlnkT9AxQOCODyRJS1m5R/zTJ6fV7opUszU3MiuGWkuF9xDpttTtaIw4pvI26wmwhTZAFP/ULFvw2FvRyO7Fgug/0y3sY2Sk8hGwfHKhDrR6teFTfF3bVq83E2m8rmM+OunRPl0JFvOUXDeorJ1UPsy4wOsXr0pHB14Y1lGKz/3HCnJPvuB8j/nhDZ12k9o0s07XwuBWOvMDnmCCnIrIxkSxmR9PPFhMFJDaXpiji0O4BcIDpGiDTab7Rcu1QYiEFvFUZDBp+wZ+6RY/virXn0nVBFzfyStYR+yyIid+uut7fqypEd6jrosTyVT9CNrAbgBdRgl/fP7ap20eWKXqX3KR5bOtfN+lqzL1GV1SSi+5/2ERruFO367UulHYGEuwPjPnI6oHDMt/+y2oS2Khj8wVdfZB0t/esDoR+NkSnmoUWLOtz13yqc7hiOpR/FA5FWwu+bFqL+aGdwu7XAapZKw7tdSuELljJvKwqRN2mqJ4qMTYmLEauOwxb3YKxeuKsv2lya7UWCEqdgezGSX6nZwU8dCol1bcG+w7QsMmn4k8ZgbC2jZmRbVu6QKNirfNvUEsDBBQAAAAIAIIQmVDCst0/7w0AAMkwAAAdAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvX3RleHQucHntWltv48YVftevGKwRyC60crJbFIEaB5Ulrk1Ullxd1nERwKKokTRdihR4sVZ56G/vd84MyaEum+xmt2mR6MGmyJkz536+c6gzMV6pRPjRXAr833hxKqKFaIeJmgWyIWZZSve9UKhwLjcSf8IUy9ebKMRVs3amCdBG5WeBF4uFCqRIQrXZyLSBnXOR0gr7tojCYNcgwtejrgiUL8NEzonYXTTPApmIXZSJbaxSKbJEhUtNoiC6XSl/RdvleibnczkX813orZXvBcFOzHY5/yCYpCoIxEwGEVGJQEgKL0tXUUxy0rc1H6k5XXs74SWJWob0SGHNNsz5AzGzn8QPJHjbRvE7cI0nnWizi9VylYpz/6IhxlGyUpH4exZ7abRW4juvOfPmSxn/bbn2VNAEhe8b4tXX3/yFdw/lXCVprKBtFYXMSZbAIKFIoiz2Jd+ZqdCLd2IRxesEKlDpSkAI+h/BSJBCLaAAItAASS+WYiPjtUpTqGcTR8+K9JSuvJRlWERBEG1JtX4UzhVtS3jTWqYtZgqfP+1xlpDODEvsMussSUUsU0+FWrOz6JkeGWVoKkKEEZwDKmYrBiBIdOxz2UlspnCqH3hqLePmKVZwpKWSnBVIOs/A3hFucjZyrj6FG2EEnUd+tkYAsL5zgth4SX6FBTFcKZWx8oKkVD7bjHdbgmj/Gd+6IzEavBk/tIeOwPX9cPDW7Tpdcf2Ih47oDO4fh+7N7VjcDnpdZzgS7X4Xd/vjoXs9GQ9w40V7hJ0v6AEotvuPwvnhfuiMRmIwFO7dfc8FOdAftvtj1xk1hNvv9CZdt3/TECAh+oOx6Ll37hjLxoMGH2u2gV65UQzeiDtn2LnF1/a123PHj8zMG3fcp9Pe4Li2uG8Px25n0msPxf1keD8YOYJE67qjTq/t3jldMqzbx6nCeev0x2J02+71jspK/FckvXbAaPu65+izIGnXHTqdMYlkrph4BxoEj72GGN07HZcunB8ciNQePjYM1ZHzjwkW4aHotu/aN5Dv/MOKAWlYpzMZOnfEN7QxmlyPxu54MnbEzWDQZYWPnOFbt+OM/ip6gxHrbDJyGjhj3OajQQIKw2NcX09GLqmOmR47w+HkfuwO+heQ/wHKAZ9tbO6ylgd9Fhh6GgwfiSxpgo3QEA+3Du4PSa2srzapZAS9dcYgbS3EmVDk2JJU9J2bnnvj9DsOPR0QnQd35FzAau6IFrj64Ic2Tp2w2FgDsuBMf7E8uME2Fe4b0e6+dYl1vVzAD0au8RlWXOfWqJ3ioPbixYtasym2Xhy2Wrqy6PT8hFgJKOP7WRwj7pDmVYgACz3cpWxcBGOzRqH4gOTsoVIhY8tnL8gQjDreOW0hYaAweDMVqHTH8d6+d0WSqeLeTGKNZFJr7x3lgJnnvwNf84TTP44yK5eZF+MkKZOmAMeSSVEd8VdeuJSglG6lDJlULAPpJbyyG1E64jTPfGUhyp6ue5SFt6haSlesTYSajPuxUQWL0GRVQXJ6SDf8pFZbxNEa0nDpa1p6S5qJei/M4vvH10jE8n36lO42VOE5h/KXWq2WxrsWs6ppNoMoepdtnmQcR/F5PYHyo6VHwvreRtYveOlte/Q0mgyHg5v22HFGnfa9I67EOM5kTb735SYVPabiEJHWB7a8QbYkJp46gztKGN0nhMJg+IRE0+1RyrsSEPEnKsbp+XkfCKQhSp6eovgJ+T/wfFlv1MQv+lR3U1L200/ZjKQeFodfXECGuQS6iJ5mu1Qm59HsXw0hQ+gUNr2qZ+ni5bd13CGFJFdakBClB+fT84Q9mmpZ/ULrC8a+894BP2Wx1DXcE3o1ozNBx5gbNd7Q8uKlwLEtQCH6L32OhXVBZH8bXNIN4WGohj65KHtloYitBlFCKq5tHjtQzkEUH5C6jbbyWcYaphRUptPjQk6nBhr4wJkzgnwolh4xmMZQKEXey0UsZUEIpBMCSqjb0ykIkoIvplMd+i0K02Wh7hYHZf6NlMAxFxHtMGHooOOmIlMaFYfty9aVCy8L0kSTosXGoNXj2bbmcLoWSAdzCJxzoDT6tA+FBpATioOZZw+xXGBgcLGRPnAe9JNLBI7a4U4gyam5mOqw1ScWhPKTv1ul6SZpXV4CvSTNzQ7IMWxG8fLy1WWgZjHSwKUJ+1W6Ds74+uUM3vAS6CeBU3w/fSqIUoqbWQw1SVQ4lsceCmsJb64RFVK01gDERLAslSwdy2zXwJ0QFgyfipUMNiTxRkbwkSIJcuLTSXahYnhquo3ovFatErGtI0HdEg/kwqR4eEw1jU2nZAylGx2tyIMMYFTY0OBuq8iAqQ4LQ5OPmU4/wInJD1+cFXMOxUOx+Vr63qkTtxRq3EUB3N6zV7w2RSnJ2DBriVDhvFMQzNdtPyQLVVa98FW5kKE1yFJYl75uh0hT9KNUlr0K12x/Jf13CZfuo0IgA9Pi0j91sYQUuvCRhwp3wRV25aEv8Liks2PpPHJAc2HFkBGjwT7PWThCOV8CyFNIHwT5JlYRh7o+PG9h97izDEROjSBL9+io5LRz20XnMzmVVlGRLLlhOartKAvmZXY+rJjFFt1U6nA1vJpGyMu/r3mksAKM8qHM5ICW6cd041WmSuZSzk1ytNxenOv5gLlxSJDoxFL3fXQd0iwjz7VcsS+0Io6q8pAeAbkNCJIceShyedsdRKQQ5+081MyQQrxqvr44oMnY1yTMHYk7lzSWoBYeyJGKa6l+7fw42UOzv96kRySOjLoK3LDnenNd1ABDCWcbzl6+ar7i7GDpwUpkVrJ7QxW03PVaRCFj5YZudw1xVaVk+2+RrEz5LICCrqCFIsri6ZVriHBZGFVoAQ4AA9Z6t2TBRh548pYLJ/UI2leteGuVK1u2lprGPCnQVMIScv6HcxiHMoALjluxBDtxLNMsDvU29jUbzXDKyw1UsEE23bXEkHfSLI5vVEBfsXaDQo3ymxXLLX7oWVVBrbw6Dj2qIx7Oeu+3pmM0A4zWp8YohmloYbcxxZoLRC7MAZSs2secb4VOwYqmY8CEJ+ZqsZDU4AlqSIQeOTKJIvzZEfYBBGjNduWJuYU4FHW0xEZ7NuIyPoeOEyEs0XGCb8NZRSIDTxdZ6POAbh5Jhmq6KBWFqmSqcEsNy8zQ6Ch4cyk11/MqxT3rPjmLE65np6iBr9ZeuTBJ8snsvip7ElyQJs5VuMnSp1zOeoDONnz5Tf2ikcPai0JJxlGfdGc7h74Q6uV5OrGZcDgR5XvlTc9c54xg7ExRhQN5C8T/qZQh46N9D32pOyurhTX9En2MwbFCs3gmHmQdtWipnuWeH5jnbAqcmljlCy2MFO/CaAv/1BAqz/AAx1Atb41itQQPge6TE6hZX+QMm9sw3KnWtmQby490x62qVfNjDjryElAF5blXV8cb3ZNE9WOLViJPrc2b3top4xSjBss0xZQh/5yJcbzL3xMQVIANOCo0WATIStLKhtK4Te3i53kQ5G11WVLNEGISKlro8HJrFmEpft+OMNjhmKHa61dp5H4mkhUjJHrPAV49f6WFUza48iiHhC8RZd8enIyHRwjvMaFtzkFUbhN7LlE7QmcUkD9nKb/AODxIK7fMGZaWi9nFiVnQh8hUvjd1+FgEC5WeILS3/8DsJwjEVNnyDEB9RYvsM+c3OUmOuZ8ZXUbZckXhzu0BkAYVuiJhCm9Jbzmisss406ihSAxbTi/RJlVr9VPesPvReo09NFPJI6SsixyYJRr5QIDok65EPuw45dyHbn1AyiZHx1bp/SKaWvob9UxCZJtTBitqTd3YhJNSVfwcr9SPJu4Tmxj/WDvOhB5mvGr+mWtzWE+1EWf1+j7ZX8LUQX5kJxIFLDqvgzn9xmlWQmqGLAVRO2UebHfD5xJ2iq90W5tz9mPdYmeDtmgt0RfVxVflbVOT90VicuWksUyFxTySS/4XH0fapfUjxpE2MvtjHGmjvso4sqqmzzuO3IOa9jiSE/b/0TiSXwIl2YYnPF9wIvlx08e9svmrx336gI+aOn5uFswJzVNMVOdU3cj0zAnymrDxisUtgZMDNqbloJJf4O3WSIyAjcVr9YpjJ3afW3BFLVxrevQdGefGZp5Lp6cGNQcj1X11fqZx6v7k6UsNUw/Y/7WD1APG7THq5r8wRv2ZWdbRUVZBwA0rcyy229xKsac624pLWoUKrvkxTldJ2r+PIRiXs0+dge038bz2t5qB7eGX/+EZmA2PPmGIeyIGmh8/KTpO6YtMiY4NIvZnRL/zWc3xQVqlbabWKpSwHjsdgBSrn7yzUkwq5rQo5b0ydda6rWb3NEAe7q/tHCLFWa+QCNKjVlTRCf8cU08RmkcMmg8YjjRDB0J9xCyAY/yPUUDl8wtGAZlpxL/UBODnj/nNenrymB/rycf39Nyrf6ClP2uh1O0MusD60EvVM6AM7vNnXHSUBOxjxb0bhxa/9OTVJKYuM8wt7muElmdwECvoDcLyLbieZvIPwyG0x8IWCw1L/z6KeAqs07Qovs47kE8hR3oiam7KAwWaHAvpJYqb24JK+nm0YYjlQJrqfxaGujqDh0n+ez6G0T7bNvPTPAWsZZJ4S2nzlciQ3+nqSNM/so719rn0guI1PFRDvyxkdEQJmNEoga+CkmdJoeu/896jjNJq2WYsMoF9gz7fXF5+Xblpgv+fMo66CHDSAPs6dW3ycL+OiLudk0tyXqdhe0a/06QuOh9TtxAc5PmFy57Li4saAvb+8bWOr+KJfmFFBq6V0bf3lL2p9h9QSwMEFAAAAAgAghCZUGziseQNbgAAtbkBAB0AAABhbnNpYmxlL21vZHVsZV91dGlscy9iYXNpYy5wee29a3vbOJIo/D2/gmOfDKVEVm7dvXO84+lxEiXxGd+OZfdl3V6GkmiZE4nUkFQc92zvb3/rBhAAQUlOumdnzvPq6Y4lEigUCkChqlCo2g7Ob9IyGOeTJIC/i7iogvw62M/KdDRLesFoWeHzOAvSbJIsEvgnq6D4fJFn8K3/YJsBYMV0vJzFRXCdzpKgzNLFIql6UHMSVFjCfBzk2eyuh4BfDl8Hs3ScZGUyQWBH+WQ5S8rgLl8Gt0VaJcGyTLMpg9BAb2/S8Q1WT+ajZDJJJsHkLovn6Tieze6C0Z3CHwCWVTqbBaNkliOUHAAlQbysbvIC+4m/5tQkYzqP74K4LNNphq9SKHObKfwAmNTH7s8SwO02Lz4A1vDmVb64K9LpTRV0xt1ecAT4xckseJ28i4F2f5zz7/4kuYHff57O43TWBzB/6gXPnz57vgP/vGhCOc/LmzQP/rIs4iqfp8Efqw/y9c8xd5BgIIhvoPY+dJRql0GRlEnxkUgKL86SSVpWRQqDmeYZdXRZwnhnQZkvi3FCT0ZpFhd3wXVezEugcFrdBEAj/JvDHAAipddAXwTQA5BxkQSLpJinVQXUXxT5xxSHobqJKyLRdT6b5bc4cuM8m6RYraRK86TaJaTg88jBrMQhEZRoRs6XZQV9qeI044Eb5R/xlVCJoQRBlsPcgxGkSTIDgAjHbJfmoIkUtDqexek8KfptqECTBkkUKtDTyRLQ82Cj0FBYfQ42gXR0ko+Xc1hfRG8FECo+wWkLBQqYqVVSpPGsrIlPY0a1jY7wDDh/dzAMhidvzr/fPxsE8P307OS7g9eD18HLH+HlIHh1cvrj2cHbd+fBu5PD14OzYbB//BqeHp+fHby8OD+BB1v7Q6i5hS9wsh3/GAx+OD0bDIfByVlwcHR6eADgAP7Z/vH5wWDYCw6OXx1evD44ftsLAERwfHIeHB4cHZxDsfOTHjUr1QBeXTE4eRMcDc5evYOf+y8PDg/OfyRk3hycH2Nrb6C5/eB0/+z84NXF4f5ZcHpxdnoyHATYtdcHw1eH+wdHg9c4sAfH0Gow+G5wfB4M3+0fHnr7ivhbPX05AET3Xx4OuC3o6euDs8Grc+ySfCPgr4CCgONhLxieDl4d4JfBDwPo0v7Zjz2BOhz83wsoBC+D1/tH+2+hf53VhAHQMDqvLs4GR4g3UGN48XJ4fnB+cT4I3p6cvCaCDwdn3x28Ggz/PTg8GRLNLoaDHrRxvk9NAwggGLyG7y8vhgdIOkL6fHB2dnF6fnBy3IX+fw/EATz3ofJrovLJMXUY6HRy9iOCRUrQIPSC798N4PkZkpXotY8kGQLdXp0DaKMgtAmEPDd6GhwP3h4evB0cvxrg2xOE8/3BcNCFUTsYYoEDbvj7fWj1groNZQAsYMY/jBncozENDt4E+6+/O0DUuXgA82B4IHOGCPfqnZAd18GD4cF/DCKYZjgKe8HfH+CqCn8Md4NnwR//GPzhaY+f/Id68m/qyUA9+UY9OVVPvlZPztWTr9STt+rJC/XkSD15rp78RT15pp68xCe9B788ePDm4HAQ7Z/zxDRR3ociYZYDd5gnoVSL8Vm8wC1aPRrjI9ysYDuA/VU9fsW1x/mtejLhJ5PlfKEevcZHk7Qo77Kxepbgs+RTBYypDA3KhEk2Lu4WVd3EDT4dzfLxhzL9WaOY4tN0Pl9WMexd6ukBPQXB4lNd/6/47K+wE2Swo9ePj7noLM107RIf/ZwUuXowxAeI9U2RZ/lSI1pxH2EvmYXGiDGWRZ5X6ukSny4BH9zi1cMfbFIWsabdf9hvgGTVXUjDB7sk79IiYoCYBbLAKCHBBoiIzD7O7oJZnE2X8TTpB7DjB2UKwkV6fQfVJ8lHkFsWuBHgHnJ6B3txpoD1eBNZZmPZVj5Cz5CuAWwPCTYFEEYoP+G+nMOWAnsGtAaozgFKFZcfygcPoLEcRD6gQQzikvzKS/Wt0M/Km1nySf9YjmDXGUOH9ZM7/bW6WyT1D5ihugxQdFzVAGF/mulfsNXpOsl8gdKi/l3E42QUjz+oB9Niob4ubif66yyucKtWv5OiyHL1YwLbJeFyXeRzoMEMUeF9Xgokf1u2vz2ChQU7NQh2PHf17yFWy1D4kDf1gyFKqvppxbBBoi2qPJ9pyCAXplkPJYsEKPCgKu52aV7VdJ3lU3rybn8YDX8cHp68BUZwXgC2yadxsqiCAyo6KIq82G2WfAMyQmIAJjQALFB5olqRlUbvb+Iykt/r2rGLSkPv9r8bRMPB4cHxxQ/6aaNbCazh5SfB1qqwssUFCOe4sHgpPA9+H7wIbkFmh/k9BbXiGJSSc5h+D9QXgIfTsYO/u1jxOK+S3eD7G1h9UKFC6UuNWOAOPigoFcpZ4xtYuiRcoViVTUtYqAfXqA6ghAcFYEyh8Dz+AArOEtYeYAN1xh9QclR1YCnCr4RfYLP0TppGHJ0RMvDox6OxItxfkrvyuzS5pYIsW7wanP94OoCeduq5ByB+Rp0FZqCq0RWi7m5atducjX8tQRrF39ug2VSAHvGgBQ3GDr4EeXdUoMysNbQ0Q94IOxWqY1z1MM+FNMjqEqqXXpPuEJasYMUlF9YIEBLXKGID0DQDdgHIdrBqf5bHE+SGyHb6b4Qf4uB3UYHx1cB9zl+jbgw/RZyCmmRMQnorc3O/Yhk7Maanv1LbZLY7JwujJklc1vRuA0FrAuZX1Ql/yv6+NS+nW7vBFhfQu08BY5vC7kSkLasJjBFBDuwhUHpwlqSkYdwCAtc5bIS/2+oFW9ewuyQTgF7B+vwl7OrmgZX0k09p1XnWNVEd3oH28mktqmYx1GRGwC3vgsmSFhGNGu7/JpqjRK2eGBSd62vY7GB75DkYfISVCGN5P4SBQdUoln+NBAgsC5otkXoQRf0StmboQN8AB7PSqPPH4DJ8FvaC8Jvwyp5M28F3UuZZ/2sYk1kS4878/OnTf9t5+mzn2R9gh4Yxwvl6E39kDRNWJOjd0N1FXMSgOeOo0MK6TWCYkonVQOs82L/fPPjTHmD4DfC4480mwoP97/YPDlFdimDjeRftH749OTs4f3eEAuskHVedboORwM5xA80/EI4go/e8/2/9//2YFGT8+vQxs0McarXYUGbqhFrWieLZNAdp6mZeItGNX8ZSrp8CQsD1EVhHMOjVoHsB7xLGwNY17bEcFUn84UGjEPI7hFEXNrr29WM/Qp1wPvkakS9v4mfy9/nzr9S3r7+Rby/+oJ59/ey5zD8ijQJG4qQH4dbhudSlr7yEUW+7rRxMi3Lxg5UtoebC/dvFwn34/5dWFgjk2AB3ohriDX/7qspKNkmiA+2tynjGHC9CObTsL+54o3r+lUIEKBIxRDR6wdYG6y+eRclHkJTa4ZTpJwWgQ42f/vi8J19e8JeR/CHzUoTbED+A9ZtMk4KelPIIrTwgqslPFibMAhXoYwKiuxorePAx0XLnPF6g1InGrB4L99HfliAcrYARYVuqfpVHGezqH9Hslkeju4o0kpzKrKJyXKA9tz/OM2CZVTQCWViBfHlycjjYPx729Lfozf4hWjX07/OzC/iJlRLQbx48ON0fDr8/OXsdHe2fg6q/Bz1Ck+gCWFSnCP+z8+1u//HlTvRTedX9FicAPOCf38K327yY/NfipgA+/F+38PV20v22q0v0H3e//V8h0qh/AEwuOr44ejk4Q3kJp3O1BHbZQetexxq1bvA4uLwGkaS6InnzdQJ64ThGIylPL+CrJxlscR9wosKKRVEHSCVGPSKVqGwgdQKXB7ZfBsEFEq1WKD0zDu2ACWyGMKQPHti4mqijxhcvcNnEC0O+UxJ18Jx+LrMU7a9q3R8DEsZq0oVfmIWRKGoueiH3v2EOSHNlHejn/a/qwgDaWCvtaI+AlrxCNsO8Lg8tmEsLhttc7fDWWvwwEAPYo0Aln9TDSyOFrw5gPpBKNJ2BODMLbvKZ7OFoH8Axp90ctQg6OqHjl6xczip+S8LvdjCHBylMMrV9q5MRNKKPYf+B0jiXcN7cosSZxCAXWGWD23w5m+CsqVClxjULszmeiApYAVYKApvbyxusQCIIng+QejMBiWFczfBYBJgRAFOnQdJJkhu0qRoIggaWBxEs1gNk2/tnb3EK4q4o9qxXJ0dHJ8f44gLtm1pIYP5WjPdYZGDuBvM84Qc4Lnsh2l3kVX6bJYVVeFrky4X1pEygH4XzqADm4TyaoaHFeUYNmo/YXm9ig3wIOMQkuY5hsPZI1ZXC20TFClTCDM+jyFSTzurDJjxXM9c82oZmMzkky0nxxAOHdJqh0ggF532CCzMFzW+MRZZHs3y6hxqzNItWEocGICOM7Y4UyRwYfWTQGvElpjOi469krpS1Ajjbp8XaYhNQ5+e4QtaXpOmUw1Kuh9YujucpwlfK+DqJ6BSwbJCdK8mMHSWG+QtmOdq5NC/FtUeHZuMId0CCrYU+ARzPUuQFe5chvgmvurib8u7yGqdqdDZobC6XO1d/f9p7/guua/j+bYf2E9xEQmL9F9ghlNBkzwPFYz7KZ+mYRxlPz8oHRyevB9HJ6eBs//zkzNPK5eO9nSuAdzEcnA197/9zOc2xAIA48hcobj/9UFZS7AEVjF4e0MJ7mv8bfLSsqmaoHO2VpK2M0qp8MPhh8Cqyaz599uwZVko+JWMUzN0qrwdv9i8Oz6kWV/jmm2+4HVkwNR3Meg8ewGuSvZRBryOyfBiGoP5oI4F6/S3KfTGyod3gEK1KxKL06z7Wk8lcLYusfsFGsE7XaNI8OTOblaru4RoxfQ0fdIEG7GBvLwgJq7AWRS2pl7jNcoESUDIhBHC/04Ai991j1H+K8Q3rOwsyhQfhJPm4jDNDLeXFZqBqwCTbm91Xp5U953f38ulVfxwvUtDH05+Tjt2OGGYah8w5ynvVTT8tcaQ74ZOkGj9hyuyIAhw6JpcvRPsyZPCwgldiLFiH+/P45zwLcds14TZx8uClKjfK2gaFVRBo7+XZocubVjr12Q7eHPxwNNgNjhIQXyZHuF7Q+nyAzHU5T77daNjxecdPFxtlBwBt3vhcVoH5umXtKHvIJmtImU6+fBn5MFgzgbqXz67WzWUN6lea0xuitmpuG0j/uvPFh5vMG2mzfaoYdVdNGVXMmDog+gCfG41nKCuUnfGsrOcNywIiHChhaZZ+SIJ3cTG5RXEY9tjjpEJnnOA6HleBwGF5F9oF6S5A6apuAmX3OJiC+ppxaZauIgOLKFLIo7OSCC4IQoHvs/gOPQ2meLx2S1KxeAhhEZigSdK3+oG+OIJQDVDgsdxZo7gHUEDptlCS5Qp69se0BCFxjxxMOnWRrjRzjiccXMbuOb2/vUlJUGIo9WiSUX5MTiiNdxpuMF4WZHPlTqZos7zltpJJD6VXFDChh9A2aRqIogVGAe9zwU457jrN7NM5Nklu9rgBfaVXsBY/JMkChpH0XjrBwVHVciBVseBS97h/5bhB2eaird/3+WC9U7qoWt1xC8n8MYjPUx5PLLR0o7HAad8LHsXFFP88+nCL3zZZByBoFkgYMhvXJnFbMytJ2wX+RToazDx029IiUjBMGI7SulB0JutipoSreiLzFIS5r/vA5kNDYHvg8gYpYfNea8ZrLZFnAB7nsXceqEHlIhmj+xkKSknBxRE70lD1uS9WBPZtNZuyRd22ytbTfCXzUR+08I/7bWBpJrrv9/aa0hCUqem1ZxOwfe6hZWKsuqYfNgzNn9WlVnS8fWq0+ZmoGsUBsQf2SoHh5XWgSnVhnWbJLSxQ/URWER5WRKi9RWKBipQhsgNsSB2d7IXL6nrnDyAjJ2gHKvfCEthXPo1BA86LCPs3rkJDTDkDdQb2p4/JDHVRslKSgQK4zR07732MZ0tmRdgaGo4e6M4NcaaSZMW6H52oSO9GyTjWdhXaVG4AHLlyIItE+ylaFcWwix9sjdpGtT8GUb/gQ8ugw26o8rCs/S6NkzxpVrYEvXBhbIzzUKCUttoZc0TJaj6Cqi+aovxHCZIN+NiBJmhSu+fxotM+jL3a+t2ZdJWPREe1bzxhBLqtKCB5myjQ1rkOhcmXtEvj6SErGY9/zZZNkUwRuLFMCC42IG39/8vkPsvEsEAbI7odfB8XGa0GdO6qXZWaKwnX2T9mIbkD/T+wkJoo/KMW0pe1vGIh8c+Ip3UnH/1V8Nna2kKpHovyiZgcJYDAkuhVQILwJK5ieLccQ2GQKXmivYHpj9OdHIxIICZbYZKBRInABMCiSMjDRnvh96Fdz0wFvHow7/WpoDVvDQpCLShqb+Z3aTKb1Cd7DGslP3Bo5R0+gmL6G3VtsSVhKRVllwZGJNYsR0YRexTkhUd2577UddtRE1++lVj1efF0Pws70Jy/CMEOGrx72rmta7Ef0kHprGCczxPlZQaKqOajI3I3RD05nuHRQlaVGw+bcXJotOqdJxnoGDTvYaqQ6oGXJULvuiIPKewKHc11wovsQ4bqc+3lgrN1N3gIvP5hGQYPA7L805rr4XB0lQwYsQYpBI/qWxYdeoJY4RFJJE54dFZDZ6RcMdaCsVpM75IZiKDarZaGeBdwutl9bzX1XlbvLihpvER3STemr34HQNp80GuHtkTmEqAwTmo4NrK7eAhFN2IEALv+0Fgb1QO8noOKb3YnvKIG2OgtWsihOkPCg0l00EOJn0/v4hECy5KyMnyL3P2TvZOoC3QoWskertpA78iKL5PJgWeV1zY7ugqmqFvmDHKOcxbWQJHWTtIx3euq6CwMZiYTNq2nbuzs9nScSkordAYJQgeiCPSODwSe79B2AZ1P+EgtuE4LwzKhlzEfL7x/T02+f29MS3V1p0xwpmmcAHgmLpkWwWge5As+/hPwZHqqYZPFKEfbhaIl69m7IpXsYt+N4thWOY5ncdFTgovxmm8A3eaBdlsx/ICe9YNdUqB23/+3cofuqy/vcVkl40rNMjplBrlrKgOIzfJJOS7rpFRmhb4G/7zvAQEs0Z7U9BRAzEBOkhtLdFQzTnA2iv1BfdT0yHJjP8RNFOkwS+IPilr4OWjQyZ4fNb1otAI8Eb+ry8h6fv++sWYEnJqOMQ1BOkl2kutrtODRNUJjVgukfj+4ZdFwd1cWxHKhbxCiWyPND8Nr+AYtmRO+BMYkpO7UW7axpNhroEATYp9lCMW4e8FNOsWDZTrUNlev5THIC7iGJxvHLL/1VO1TC2II6rHdVEN6//7vIVVA7y75+hwvRdC3F/DtEk+or34JfgFS0pqjduWum2FKJvLJjTsAy0Dfv6ebDurgAAhuvH2Ob3FVXqd4U+TO4DMJ370zCr94/75vMXt7w5MdYwPZaTs4TsjjQ0l7NMH0a36OM543i2DPHUhfuz41HD/0OkpL9r4S/3izAEA7/fF5U8DwoKH1eWlzM9nOlQyksl8h8qHMdwAaOL/YFGfSnDZC2SRxA1aDGTne3izDh9/tH14MIrrR9+Zg8Do6OI6OT6LDk7fR6f7Z/tHgfHBWn9bh9MrnaRXN18L39M191BfO2BGQvSB8FAaPgj/YPbPpi9PfGB0X5tqBMmjcrLuJfQBXZySYu9MGraT3RHflQjAn8a+MrXto2zZID7zismCg7lHYmlaznHNbyOkqGjx1d1HypR/dzip0zTqXV5pJxYFct3N4UGObU6cXWnJW8Lp1q5ok6t0aWlQbk8FV4b6UAiA6d7r/DERoaperCNEs/WsQ4++//DNQgo01dN+tY+iVHv3W3GuZld6Z24FonfdY875dLb12YP8Pbw9okKlLWqg198l7IbNqUBqKgEF6S3jpp2XOupXXZaSh039HlUE2Xopy71fpeUr1AjW1zNMgmVJifDOUb79yb5nj6ABa6RxNLYTMcdwv0hwMTU5LvQSN5UCSP+VUOwYZmTVWJUY21g66tCJj70h/nMaBP9ezuFwzX9mE0OwCO2OYq/xXsIUwuuwd0LQd6MEGTS1CNYy5Ailkex4OsshhwV9XnW4L61OVfUxSLx5o6kNy16MvqEKTQU7a91vlFF2o9CqyKJAbUcbXAuJwKRheCR9EgCvYsjItmt34TPQ/G/XWUVgpk7gdVxuEwrXZjstyPC35LLd2I5PVLbR52rXaGG2jAO0eS/RlR2uEXuXkrl+kH2EtWgb8fFktllrPEDZl7H7Eqm6SZQGaZzrmgYkzGK+fk44sGB4uBrpHF87MMzWLbZHtY4aXZcnNA3VmdDEWXABMME/KMp4CdrV/0St2zpndoXctXkxWNx/QE34XYfz5Os+foB8t6MRs0rqpqsXukydYAh1auRSZyaDEEyguIOpQUTd4JRC1umCRs12Gz/zoIgPeWMF4UGTqAjSlJ3GhLgCjUkC9mZTK3WOUTNNsF8MAcACqWAqo8aIzEb7awRVg6u3iPxsVLpPFLpqzCminuk1gmLGvhDHXlHIL6HEkqLCZUOLxwLAU5InENrAqF0xjAI3OuLiuBUag0akbj7hUREhbgGuA2BcTnBgaF7zFMI+ttw18INOQJyVJ/fSb8EfPsCSTYvi07hrepsG/9BgagN/PDL6PtDIsHW9SsXkucrx4kMYzl+q1zcX1x8SCe4R7v6AAH53wzyCfPe0xAob4yrd1SGZwru0xFse5GmKhW5GUlQrghQ1Y5ZkifWA8SVF1npKUE18+3aVWr2w2wlc5jQ4P7RGgOjSjfF1m8mlvR3lkDDe8hX/1SyYxzn+LzNzyxdnhTlnd8QUwWomP1Zq3SjbobI66RW1Y1ExvGymbAmuIrwdgWeD8R/RUkCriaMZBR3ljFfCAMYfNWKG+jjx1qGOMC/QmxjhoxXqy8ACxsCA0CWX6BY+DF6g+fCY5NBL/DiTI03FSNgvC/ifdgf74NyrVMb7xBxMkn2muQPZvYBYhepsF3rlQg8GCIMye1V7WsJUJM2ysEPW5z0pRn/rys68fjZv51gIiRiL9gA0qmRkGfPVpLKCaW5kf5PVpZltVPYvqVLWmb7H/e+2f2t+AbSDPrpmnQ5NGlfCRfMI1BQk2bzU4Rx8HzxzQHoZts/sw7P81B12aH3BlsrgZEoYhs6tqtibFjx25xPJe5RIi22jnipvlPM46GNwJwzjixR2+/tbD66CVEmx4P8IbroDuS6yrXfy5Uo2fKoVPuVC5vL5OP1FHGRAft+NDdESZpxVHMEQX+U7t4GLE+AK9EiTzvVk8H01iPGCb7wY7+Mc+kQdsOoi1clVk70foGoZCoIYokogqYjl+4sP+coFui3RJgfG7fOoEfzB2GWiNkf8d7LumVyT19vEeUQIAeJRsTREsYsmg4cP+82ulWNP1YxqbbvCEG+tJXXVuTsNX+0tmy/koKfRNxkiPoT24akSVtPlK/L64em22UIewdHDIN3g7yafdIHz+lzDYw3iVX/2BSUr34oiwcTFd1m4Qn1SnHUTDZ0+PQCf+IwBx3jzDxXcUdi0E53wbjrlJJ/zPn8pHnZ8mj37qfwv/dvHX5f7Of8Q7P1897n4L+wKgL7SAqXM9i6flHl7/fnt8cjZ4tT8c6EU2b7q1ssZRbxudLQfDLp5shxVfnYXVXRkRIJluaB7ZghE00BB/f3NrgzfQLR7keZ8uvXaeKQcm5zLIFyHFKCBSQScv0mlK930zZCM1vl1E2MCjF2jMCQsa3L1AFXiuSWiuuBphKW5OxAera6DihNIJvqSDSXXQS4fV8a2anWpOGKsG46UUuCUgtYWCBZ55o0IPWCBMvC4l67s5FLyQ9wKD51xqAFefOyIcYYWdB3iB4QgIZYDifXJ0EGag3EGQIeXXMoH0gsZKvQDDtvBeYSDaR0fNDhpblQ6mrmXWlmGsHiln7fBl6DyM6PojMm3AXCmg5KyJtEe+UUdy8rB8G/oo9LyoW0gr1QALnEQOMdtdswMJaJkZ0QE1vXiMDhudv7zsBUcv9aRDnQhrdoM/mcw3+bRIxmh6JXUa2+MnQE4gPTAq4ax6bHsGjr16zliWLmMmEfEc3ai9SaNBsxWHKvaRoEsy4MW0NdF5vmshW5gXY8hKQzP92RXuSTUkx/79eRO3z1XUxhnjmKUTtVF0FANRE9Ymi2MQttZr8Ej2ZrWnpWXEd5HRtNTBS3q1aUV8evjCF74K6qLfchuHCCw2fGYeBa9VQKZ4TFYikkDeDPdfHcp9sUfBEV5PQbt5iTGZYJfHANS3cUbBFtDiFGy9ilkcV9emyPSgbk2j54iQGe8TbklMCO0hpWNCVclsBlXJlNJjt2iMPZRpSCNmjmVSuVfOzimSCDsAzrAcCd4lqhfsjQGPMITEDkUTmrGQrm9lY8dCbbpBc5Ts8GIWV5VRGtIRiLEvoLXQXXKMPYneU/M0SxjnO2XDqVFnxyCNvAx4p4NxIfvD6OCHi+FZ8F+B/vn27NT8eXL+rhv8Hi9n4iMe/Et+ex7hHfsr7THIl68oAod9OzWeGOHAy9rriy1bZaUCMUhcj9mdOPrYA4YRxEccnUv84sawEDCANSyIOxWnHCYDAJzrxsjdh40wMnckvqZoTBoVZUmTMGjIbBMTb7zKlWZjiZ4n4UVgoMezvERX/SrlBYo1nNAd+XWNDsUPhWlHYKZLaB/kgyTYPz1Amo/SWVqxlw5Mug5FSbwTdzFtYZNbnmU3uAEehKbGW8bqFgOyo1ceTeucZWMENAV1eplW+bIE2ionQqHe7I55/PUSfad3gDr5Nfcx58gZtmOhYYsMcHKxElPyFoE+harr7CiUT1O5uJShZxciE/NZ0OIOWeldvuRQ8Gj8tJeXBHmxAp6o3caOguK9ijZaop6Mdv4GAFv2x+15tJySJbIAHEsdzQWvCbJJmOIp4Iyb4w6cmFat/Y85cF19PaFkYzeHfWXMsp1ldf0Ho8r3fHWiyJkjUDO0yFmF57UAZLKqyR6LMfagwkdnn5USzh1qVdjvJ32NJoB8YQCFciDrF6OwaZ/Q5Lye9JE4nmv/8IYWg/PKf5Sg4Rlt+6w8XmcmCwB/6dNpddJpP6tOynG8MB1TarkMZameG8HH2MfdDrQ5hlmd4jBAHlptQBCuKz1zQLgRgLjUg6YMzaxYBVukYJ4dATlJTGJZKo7XPicnFDfswy2Bj/MAtL8ZcysUUCK6+wOLh5M3aDTaoidKLKVlpoLLMFYix/yf4ckx3qlV0ZQudLHrdIpenehQjYctJhO/xQMAdr3cOFKl4inWiFq0a7kLyWW6HsqreChU4DJUAwbb5YWMW2jqMBhJdiXB7fiVIDPR/pUbVIeN6DbRDuREcgMWx6JkOxV9I577OUOEfA3kCk+PkJtTO3SiYgXC+oyhc2N/hBuOJUkiSfYxuoatC/lxp/Xad3AIK0IUHUIXqqVFntHmpTYhsrkWU4oAWZiOJShu8XPguVLT69ZSv76ECj7jE8v/EmDsjeB9nFdvUCDXwlWSpYBYRGoH8qvOTF8Y29raOlSPLW/4mDx1RZo6YKG29sVnrYH1hZ44DuO2DYrA9AbGHOQFtJ7bnr3UfjKpD8WQQtrNB3Cq+9U4NpC66nybvUREj2ocWbVFAW4DZJlWpYAQj41mUR2aqlN/FRLqBxH5Nlu9w3c0AXQlexrIayengI2xDV8h7lS5xFJXVkfsekZgD7KhRThvV/aLitndISW7Bemm34fRQV//dRuqS1pXV/ZTsaVTIa0qEFflHbteqfXFP981t6TteiRWk1XlqbfKDUutULyF5vhguV4OqGviemFmS/lh0IzNRiHrvlbXvEWfXt+Je4LDf8hbRcUgxsi7W7QV46Y1A1DZzrOt7oqVJJjXMbelFc8NWBVDz0tzAy9juzjB63MUEacUQ3I+mzQDWtfbkgTUqXsEswCPhvruotYEtbvjPWA0PKBW3bT299t78njBdV6TmNFyAtk4/PKTu/a4uTd9N0TGQoSno1QRv5uDjAUlFSVUE9/wvKG4dGwIbNlfOgN1oUnmG0fztSuxGNDhW0hSjtZxlKJNK+qABnzd0ycNEUYV6QWjO4QVkVVRH2JJZEX+1RTsqTBApa5FCmBJkRh76GG7RPsABg+eLfGOvJylSADuCcwMYI43ScHPm/B1QXgd5ddSPZ5MIlSYIk7hge1qhGVuSz84uGIb9hp6em0d1OHHtJJLphAaNuQEUGv8AfOrLVOOaBpnToYT5BEcYVUD6cQzzCAGWzWmdMO7fZzWTZkaOAcKaO+U5C2uqIjMZBSuDQkqSVR6gSePmDXxhaTSwt4oP7vuKwOyUjnx5AwfdSImZURO4jg/hFdSeZiX6UysIBS+UWSvnO/U2U1Y8wmasn7bRT2jhKpU86ldzSptX6ah99YURl3L/G0X5YmNvor0xddOY1pjoBb/G7t6c97jwU/joV2psSboyM551lKFV4dZgZ+0FE+toqlTbIwhpZcLmhaGLFLPJLa/eAcgQneHlldlPv6AsZHQ6NxwFaICN2ja9b76mBQj9K+7s/xhtoMjNjKq1D54T9hM0FcqC5+pK3A0pzsDSrHMiJXQjZzsY86VHeLVZSKBFi0XKKXQvQIHX7nw2EI+DpVM+NF7ZxlxCFYf2FkyjWcRnTxS1TAStmOsGAyCqR/z5LYe0ejZT2DMrAea2qGHaVqfuo4ksqG1Dhwkui4tkBKnG5mK2xQaR61nnOoHNuUxmVfvgwRNMRsYTirjPCR0h0NYWUQBhT/pSHL2qZJvx2n6M4P88FFL6W5453YfbYD/gdOzZB5G6jenNctdfkDP648u27es/O6CQ8VEBIzS7PF2wOoBmRDMOSf3vjP0dkX63skhZDyNUY4nU2i7GOxMb0aCY9pE8rjTELy0wIMmVscQR7GFjTvenPFLxXugcyA808fwydo+XhtheHOTjYDixtNJQ3s2EcumAaK/GF/E6kI9YDsm+S/8Elq6hnlfiIjh2rS4Q0O0CdUX7cXpmgQCFZWczhBGZK2fJhN7TC33JXX9yxl2obhV1B59HlQ+ESDDeEznaSqAtzrAM7gqXz7G+/B4lGFOpIDdzVn47RzuH7/de9UllRHjF1MEdXr3ZJnVIQslDZuNN/M4fmXia77VO3KnZaf29LPepclAaq57XJWWEOGZ0Ir3Nvb3TvNRA2tcgqKSlB3YFziGuVvq1bvBq79oVhLRdbHo9cHwFJM9DM4ClQ9RfcKyKjDDiIEeOvnj5QibmYbI63wl8blTFBU6X1F87hSlwOSeonS/zS4Ki8tXEh47BcnPx1eUXjiFUcjwlcXnTlGMpO8pCY+dgsgzYBr5CssrT4XNS5Oq7CUaRUFzCqNzoK8sPK+L/vKZU1mLhvVq6rYW1psQRRrbpKDmOGvbV2Jvp/Fkg8osAnec3xtUTI1K6fX6JcsugSZfmSTxTDJHy8ZULkc7TTVIsWIp5Qgd7B2vmbpvvJCB1xKrzcQlEoSZj48cGEjThD0x47TdnHe58HRyfDtRENF6gDKtWA5EvDX1ZduWd6tiwZnJRLq+HihJWRkkFehmUehrJ7z8fv/s+OD47ZU4BjWKN0/rGrZBrFPnANPxXNi+LiZCvJKpYBs00GlOhBAgAfTUAb95ialJESq5lhqmbqAoYrN2/IQADFY/gmy+UxL1rsLLKvJLK2FfD07RtxPzIQcGkZUDltnT7r3IrWm2Cc2hGZPeJL06grdwJB4AOdSzvCD193md/qI01iQptpIkPcnG8aJczmL0zeU2dHW1dutc7nXiEjrVW1S4sMc3XILdjlSaGA3FzscBAqhCSGTpO7KtlDcUaEh7Vaiu6B+iNkvGHGCCHd7ketajSVKi+kMz0fLBo+rewLViYjJ0zeaYSuPKdpR8WgDy6L7VsR99jIuSfY4MTjRSKr9xEgu78Ia307c5I2aiexBj+hDQNT/0xAkDWVrtq4tvYHCBGgYIrF/B1En0nRssRpGCyIkArzgB5cxbOESzmrDcAJCWmb0TAR+hdbifzrLWnVeF0TMOv6jiPjrXSoMqVE9ptn2ZqInSb+fso6RATkF61ihJ7shOSXqmSxqEFC0fszZSuLOmHZCc+mxo/LDRMCcgapTFh56yFSextcviQ09ZjtrkFqanntK14n/JmPYEtZ40e2Xtcmy/FGvHfFZGCR2XT1zNXgPW0dAZA4OeyrnbxMECrwrI6449Y8jtjIItor9Zh645uhC7Dla4KTWDdqvSl+kV+eiqhsOm8cEu2+wCPK47WB9BOqNRv2gMiBnNtbE49ph1kFV/TjFNOfsV/duTxFf0b88ZDMp/ZY/vnj3Me/zHrcgpseRvr+7/nv7WM7Mo1V9rQNqxXHIF396wgZcP69Sa4k2kDI4OhzsxpnPoS63vOUkBxbdMYtiQ1AFCKaHhCbUnNAtoh5N6ArgPDKvCrWuMEqLkcr5D9zj8gxMBoXLzAFkqk3Oiij2vllTM95kVE1WzlocsUfd8R3d101Oj6X69r/vWD858W3xC3Myk2N6NyzY1pxo2sOXIu0xxhj/zgrIiinkkG7PBRlc+txtlIjXV4kd3gVGaEecnvoXgpYzj6Ue9VdW9bpXq0ynGPfTjoW23q5oyjNodDcgbJ6EYr7i6SbC0XQ1luL2t/VFeVKQCyMa7RKOfmuuY9U0SwkJXJ2Qq78zSkbzf4XdddPDlSzmS//h3W93PnQO/xfjzimaHbrWoJSOxHc1RbuvyQoXZ2Jg9cvCmOb0zi4w9io9A63+vnH5vtDk5W5OXA0sZ1dMDjSyZUNmhLaW7+rIJ9LQXDHpfFchjruvDHLO37r6mZHnN2596+271zqVYd9WSUwnQLQgttHG6rx77nDoYJ5pp87ga32APkNXVAtwGoi53uWH3Phl6vAxacEspKPflU9q9d/yT262zzRc9AsrkjYP5lRFwXrP8Hm8wcVmpy+62ZXCbPd453EQQ7ob1naPSM4KI5bMrlT0cb6C/WD3vzFnTnC3/crPE2A/Rwni/idI2RTwHI9C5pA/AshxnBH3pD45PBsfnPonO4t6mmAWMnLRMtANo76HkE9qMQWFv6i9+V+k1DdSXpXSOKsVohLp2sLL/h2c6SqcRnjDzHVGTLbKCzccDxq2JL9KtcZYQ2DU662p9X3RUQ6WquO6MriC5eu4yJUmn6pdVBN9rPdR4PjWeqxtQUBYD7k4M6xBuP9EcnaeiRY6ndF7OgD8x6KbKoNwQFgwrHVOvPU6oC4skhuZoNJR9PTz3MaFszA7qYCq1SYLIsqFNYpJSkJWmtUEMR7rH3uXGtR64A6ZCqfLrjeLVyuVF5UKgODBR0Du0yssZP2c6qDVHNzdCIHXYHc3ZR7rKsmRchqREK0EdIuT4zRA3BU5QHApiYXBdoiUGL0LSvOtxAco+zfFGiAKkQo2SoCOuaSRl9b3IN7aNa3XXJ3yCx9JPqDVyqSgcHYDnv3heXtPtE7zz5J6vXDcu+/jyJarFZmJsLyRjuam9trEMm2aKGV15zAxk7XY7k+RjiiG7DTA9IDNHHRQrU4+i5nTJQyNLFGsNgChfGXPWnLcWsnsmcM/1KrwWU2ofDI9Di1/5gba4GmPrL4QfBahFUjFtPCbi/tgz5mDJ7LbBdxsL0h5UQ7rSx1lRu6TVU0HwV6u2nyNBMdx1gtxqU5hKm8Y1dKkovY50AB5h7tpoYx7bKUJ461lUULUF7R4F+Gl4j/5GpFkWm8yemizo9uz4NRkwTCP7ASddoFuVlo+Hqq5s5Wz6MWqqE5maNJKeI77jC04iX/Vr6nQsJo9Tt+bJ0q+2bcDuX+rsFzYV7d7XjaxQ8T0mVZNgfuctKqXw/1OQtvIJw3LqmmCNV7/bMwcanrSzFKOHbI41jLC+8nTlwsbCby5qb8DCzJrw5uz83Z5ZtaGbkJfoSnsV5qTmTAyhco3DSn5M8c2lKn5lO076y1yG2nobOt3yohJfw/rYGBMuvRIRKeLiYRDN3tG8Vy6Uwad2/vRj5TNv6XdjUz01LMW2agrCKCgxfDfIQNLxaVurl+JnjTooHluuDrgrB9GmQ906x1CTnHvG95453nvG9x5H9Ylu43KvwSSF4kCw33ktoWv6hcZ6p08SJ8QR6FS2GSfDhbMnWNsWHX+0b1pyOtLcsv6l1Eq8L0+nmquOsd0tE8M1RaQ60jdRMnGkHF1bqymEQd1qY+Gx/opiLmGzcexO7wpmYIvbCVr9F7eghQnU/uLW0o6NBpq3j9WnHjRD82rKj2sm6vgG/TV5au4aoWwoAO9ywWFbeDEKBawhEoJTBJ108s/I+vkcHAsrZP/BPL9GANt2KbSWpbexc+8MQzsMjaie4bQadp55OXfn4ITmVk/x8G4LE/8tppoKhy8wk273ixkjre12xsgnxf8PMEZ24vifZYxTzRip4pcxRgY2LRbIGKdonhKo/WlhmQeNBn57xgj4rGCMPAg8i4UEDc44Zc44/efkjOz3ozmjS2eFw2/HGWsEpv8DnHHnmViYzWKrjuPw8+vPrS+XB5E67VyPHWf+xZkemcnwgMF31mA2Th5zm3BG13hjOu9y7uhGfl3vvBIXPWSEXO8P3gml7zU1IXjBGqDFQHk3H+WzdMyjDWOTj6t41tGEcc+V29pv2eEVkTeY2/hpnd/rdDTqF3qMUOdUFE8M1ILdQauZ6igFBtnaCOAkqdBHYc++fuVjZtTq7/Z0BD2MkUfj1pKQg3MjoL2LatJt6JuYEmUAiYqY0sgC1hyWTimzs1yHkf3NCM1UVGEGCDu8UD1LcZFTnMQ6liFjid606NZKIXwUyXjS1OsMo2+reWfSSM8zPLBzKqH9Xdf73V7A3PqfcMMj31ksGz59+PRFTqfaCvF/8NbnQYWw+NU2we3gzcEPR4NddHNfxEWKVyDVvU1xx49HGA6ezq3U+XhaSoZUiVE5cWAmFM0RI7VsZK+6iUv0kAQW3wtC2H6hC2HLSuP9GQro/bmFn7XmvhFGvomfttPwJu2ubhs/2xgVCdPrqrvpKqBoqVM/sCs7+cONmjOuBjSOi+R6OVOhIiXEZAPeMpskxezOl06ihkXXGPjMGIZpVdkanLHVek/1v4CG+EHj3Oc3RnF7rcqKJylDdMvr9qHz9sHify0gP8ciakWCNOcoHQ00HXlOB2dHu5TjiEKWS9SDhMLvqb2S7kG07DVl8wWdDKiWMMCR6TPUUw0fnpycdltaHhX5h+RLEFiZwWqlBPUb2yigh0oy73kli7Uiib6hvqcDj/Ylyhm86riiyUrZ1iyIU/u+ezN+kDGqqrBC9Jbnia90f+Wj9oBvV0EML/kNFBFTrjCuFNxLqP+nUmIM00hK145M32+67mYEifMpNapefZuCI8yhg0jYxTFti/6Gz8fzFmdzoBq8bjqZS6Xm9KihXcpXQGCPTSDmGHMfmoeSzvTyccb1UqIU3FxSxM8m0mKz3KV5cwVrtQ6Ef7e6jwhZt75SjGwUa+BY//QSWDslrDtGbFVL8WPeL/BdL5DpsXITl6M11PkAyurNma+Z6i2gwyFQdeq2ilaVsQhUQgiMZfqYLkG0SHSba8f4uYfihp+1ew0Fb/yyzcbuyapNR9OyhZv7uJHXD0/nVDLm50pGMyMtQF+ndBl8g9d4+czOx0nIqNiMxTtPP3t+rrv8UpAjKICuHdK6l093n/s9MJhSlyanuCJv4RK9hVXW+3CHGXkfN6KFJ3a3CUpd+FZwnl5tVM1hEs2gqz5UfcKtT/AyBDwnexY++jNFK5wn1U0+0ZOtzZ41npW9WiLqBVaxFl/Qc77ajb5VZS2RsiQnqi4GsyMWUZLolOiEilwK5sTOIp4aWxLBTLPxbEkx52/ozhLmI90NtpZ7xe3Op8c/9KZ7Bf3N+e+WhZ3+8TliG6bau5UQfJSAU/UKy9XURgcmtnhmNqXU7Oy56vZ2cJoUAdKCL7cnfe5qPLuN70rlRot+teFjnJU7IfLncC90oAzJuz6liNV4l9CWaEFJlUi92NPoBFSY/fOTs+hsIIh5pFRQn7OJ3DGSbBz5IqFUg7b+gE9LH2isC1X9wDF3EElqZVelIqWIRNBUPMJtQnKlcGoQHYi5mdFxOwhV/0KO4BycY8gNTlFBYZEkzR6bC9Xdgn6jtwEaC+74hB2JHIdy4zMErEK7PBaia7XSNGaZ7jztumWi5TwuP0C5jlTANHINAa9+F4ecj0yX9TgtSMvhcpqHLlVfUUgl9qwuONlKOqMYYsZ9CbXWqKMUKbkxNBwWAWOQ4cFmBo1hq6F5c+OOoJP4glPI7dPFcHA2xFlAV7O4+x6rTzOdzyie1MvLsJmqtaVkCe+swlWqQ7vf5tQ7tM4kf1sCCWZJNiWPnCypVxa7jP9tmZRN85qZfoZiDmRJnVxmnBfA8Bc5XaCs14Y9T8ilcfKpJ4aiFIm6nJO3Z0fNHQ9Z7jmSBp6+HRQtFsZYECotFrhfdTwUAWiipZztuMWPW+068WIxwzCTsO/0I5RaaBviTYnid2u2Sr0wz1v4cjk97tVrz78NG3sAtUTNRjyG0DnEBF93GCZxuEsYxquejWhPA2o6fKs3sukijunY3XXXt4tzqtGquAe7mzBasqQKcY89h30Iq6F3Sw9rEV6ldsWz73+4MLIbDS8OXtsCyMwEON0I4FsT4NuVAPONAJ6YAL87/8HlCFQD95NlL5j26DqJtaxpTzDpSQudpyEMYevSSjGzGEwywYj+/CettOjlwfmwzVDUsZr6vQWmC12xBro2VMzcoX3sEMec0GYLm0Pc2RDiTqMPVgsNvWajdfCrrHTj0tlnHZuVEYcPyos7o97w9cGZr56qdhOX0afInFIwyjUCvw8GPwxeRXpioI96HUOX1/8PRnU8uzYRgdFptGDKpW/lYINI0FOXqmi/DjlcBArjKNjwFcvk+hqDYKCIWVLK2bhL2UQMkJK+kk5F0spI9oUGZyU70IRXEkBMQBMjhD3lbQLtMufr+FhlC09uC4wVTJW3eAeBb+YZiJKX8rJPX02ZSj9zGDvSWtVbNhaigdJpTWXOCpmTi0GF12vHdI0CIwcYKtV2cFNVi90nT+B92eewCP28mD55/kSFAKdJcVPNZ9tePokqdWOM7YXGbyjicIPfIZsO/h7+gNEGjRR0v3gij02bJd+enfpK5s2SJ+fvnJKrAlJtiPHT1Wh6X+etr3+pR5J85AgFtWE2cWE8mg1gZM56wvy+3kowsx9vP/IO+2299qB72wLt+9XQvvdD+9QC7YfV0H7wQyuNAcY56SmCET2fep4j7UwWZkoELWPaaSn/thv88Y/Bi5ahbqt1QrW+caYAT557jCmmZ1wxpvD6PmO6Gtr3fmitY7oa2g9+aNaYvr33mLaR+wJ2pT95B2naPhPeftaYvnDHNL/nmAKrWjWm8Po+Y7oa2vd+aK1juhraD35oZcuAVeZYg2T7WWP6zf2Xa8tMyNtnwokxpr+YG+5BVmL6XbVfUCJ2D/O27GeUcUnnyJKq/vDtHlCXmPG6z6kBVKorXQdFzbyIimSy1G6FWN1//1PkZ18j+OzqEh9eNfQ+ga7b0YLqU+e263W55rCWjf+UkG1zd9Ft7RZQ8gWR0rzhiEIf54TVFepDwNX3d22zhkLtkuNgXvXMR8atOgd1DaS7qn3Phap7tC73O5yWFb02w8Bzc+EeGIgf9Rdh0HQivgcC7M32Re23Tc17YGGecWyOyyrnBq0W/RYrx73FvmJ9tsGv72nYK90+xfsnxxizbXDckew6F/SsdGz4MQP9ItcukhIjcte6ooT5JR/YOsUxOb6yfV+F9K1VpcwM30FHu2RrxY2mdCrR69WxehllK1av+ejzY/UyFP3mi11baleVtKxKv4NkHX+n5Z5Q8+Cb0bwMoaa+7OZ7P5X37nUT/41JiS+rrkwCUESte/nUPnBdeydIAAE9qP76llWgXH0pCfDtTD+naQUJ227cN1FUMS4Jlo7XuCZcfVsGv1llVgdhMqF4HI8ti1VZuQnkTSjbRgRdNMTcJTq/oHOY0erl2CSQQo1OZhk3rGAfObIBUUMEznwvgJqT+6GaZEOTW0Z+uo0k3i3Ab+Ji4sL1+Td6qiJzCV3CWYFEOGTv+igiVgNWmAF9lb89hoh/rpTpz4ymMSmGB/8xqBdAs5+ePsajEvhx3Uubr9VGWSvjixMS0+T/F2VSmulqJEY7Jv9KOCuRsiDP7kg6dfLL4IEUlcL0NETfw1fRK0yzYmUOE7ciIzmkiUSDaWxrPyQDNcApDDkYKj5VKc0dhLh6TBl+bynrz0jtSyAkje6kPPIhgaDT4pggpBSgIa/lAfRu//CwZ50EC9eSEh7OtR2oXFEqq1r4KpTyZtAXPp+VNJcOhLQsMS/RiE/XQZkD4UDFxqeEpXfk/IQ0k4NeB0BOWZGXGScV8lBtbZdfOaffRsLnEIefZ+irsL0QQdqk2NFgONx/Oxg2ym7gZOaNqVv3PGFvt7g004eK91vMlw5o4uWceAlD+9STUA5MG8zC+FgeZ5v6L9OadfJ5sfSGIXpEkKQw4G6ujG2+aULLwkjnxQvghqLUwqwhJ0yVjRKTemH+MSunlwYozUdKKkTXNFpS+HwX6htlVah/v8zFr0Tm4Rjmdjx4THbprSlpMMtGqjZLbu1g1rgu+evAK7+ZoWxk/9P5m+3JXCdX+ygeqfygEWVdhot4jy6sor77CquMOHVp9aTpxSeUUfDdWEeqos/pgM5yKJEaRXW6TsfELGh2UPKHaZ5POLIy34PhRGVp0+ug4SCa4iLJ4plK0aZ7hDgpXFFxUGm7jMRgOFDsZPCh0U1F89ZgSt4cxc1LpgKnFwwH//dicPxqgPtPF4/ffIU6RpDKXkDGEopX2erhUlMidCmheqAuX8bitVJwKEWHYVL+TBoiTCTEVVui4XinLFVpOkU4K/aSfiPj/NAoqoiOCNCKbLu9Cq84LSJ/ZZANe5nTcpOX2ZnyNuNoX8gZvoAncVJE5PYoTWFLsFAmRpxy3KjxlRyeWulvaXCLKSXqpAzRmJuqXM2dcDikIHMGnXWU+YJvnb9JOfuN2FkpA8uNwiaccA8b9WQgOLG1GlZqs8a5yboER6uyf8aUjRSKypDLU0VNAQuUe3unQQ52/5vAMtDpTrsNpqg6sGJOl/dIEYUfSRO1dUqzJcRbGZigSKVjmvSDYcITZa4SxI9L8aoq2FKCWzw0hIxPU9jfVp12an3nGxB+6TaEfjfRU0s6x5aF2LPSlrprU92ft9LMooh2ePI2QrfBWlJbZlqZjXRWzmZizX/4ajdqWglavQCctMFN1vzAWv+1REIRI3mRycrvNq9xf+CUMZ5MxDSxP7ZMZCuXd/P2D+nhDmjhDi0AdUZv+oWpJpM463zsrofLKZHbFxynu74/WMyr3AqVE2XfG2idm7kNspkr++N6gM04t62Qm0U3bMJJ6NwKv7EiNwCuGY8fqFNsM5hmtuo2ZIlZb9Z9zkrd2msrL/om8NyU1ivXmU6r7oPclugeAeqc1Can8NhRvRyyH09IOXHUd8otzx7dIoIyP2sIFB/QAFdUJdrfO3XXfbECJsnMZI0g8Fn6mR8/GwxslECgrYu6aGAwe+KJD8uubJP6Klk9D3pB2FMWNareIc7pb7rrxjVTQR3cVOQe51PA8zEgCigtKVc9oNJHXLaCnT8FW4ZNz4XVbfSX4Ax9/ZVLLrqfbt9IEvyQ3OF24Ca1blotMKOjtU86zB+nWh0XmfEpo7a7iFQK02ZzA+WHFCQgPpaSACgocFRapKFx06krBLyojQgdO1iPoyWGUAjuijw/DRGkRdyn0jCHnppd/cw9ndMRFnMcX2qzsTrU2xZBkZGBAX7mqjr0piFreVJX10rO5+g33JrVISa56BF2aUU7HgeT7ibFG2uGq3kt8rKejTnt1+v1FOeGvGrDhktTNesuz89YnfhpW0ju0LnZjv9lxs1/oVIGDpUiiWhSZ/YEjLXlSAirORM1+K88fDrT9T98AFGGu2wO4eV1mswmV2oQOT0CPtJcyT70zPIs+jkpcoQ25oapJDeBw45u51fuZMDY6aoqOaZ7o6M8rUG1D11zwWsLn6Lu2vWuCLz5pFGt/0oTBz/3njyurrzONhWGYZBklBEcree1IbSW/+DVokjwlM7OqIzu4xSu4vJqsxn5Wyq/m5rPP8N2bRmHtSTcst8KVTw2eZnhUsAzwWXiKro2h0JN2a2eTCQF61cUIT9jrt57jqYb7E3GvCQvHmM5s9VyjFcdESvaxg1i+WfrfVjll83CcuHnsN71Qi/iT5G8jJqyIyFUyp4euCmrBF2cWOWC0ll+5Tn4FyfWniKTmMtMsKXtMtLiouAFxLUd7a6GDUREkTyAQQSND88SssSsH5Q3cqrtgJBh7LHfssGduFVdTRXsu1TROPiu7jVpjkQ0W2jacqH/eukTN5BjheSOctABZZotWfuuCd6/h7QLUmqj8spSBpKrZCrdfZtLeUQml1thb232tdckYqsQ95CskQ9LOcAzRTmSxhmI3pLreaalOtXuv6Bgp7e6ex0aORtzvR/zdXLznjgfnMCCjvkgRW5oZzl6lcV3Rt2NOeI/w1Y9vsnTsXmALQ+8Z9JkwOLy9z58Nc5UBYZ78KoME76S9zh9Jfvd6oNKRVY8qxRJQ1pqX83bwendj/F8hh38mBR4lxD+xdSUHOCEw3bm+azsB5i7V7wmllk8H6XTZb4sZ3eqboAOHb1gAnMnV14WNI1Xx5HUx3dmC2EFasKUt3DMprfkOMRoi4mX1Q2dZgcctYBMev3WBjAwLUoO9ZTQuYTXERHNpbRneoyuq5uIgN+lOO/p1Tgu9aC3B3XCD+bNmsXo1Pny5ORwsH88jN7sHw4HfTJ0lsmYTt43AiVcWCD60lX7PtvBAMMPjyXGSIwHVGsrdRTFeujVKy02IygYmFkkxi1+DYWxMw6R1yadUp/fdHTOzy7+cYPzBXTelB/gRwqhjzduwEpjuKyducZdQzXn0lerOyybOR/Tg2T3sHYTYSsNbuC9YJpX9U7eMzHp6b6spew91W4TxV9R/VafdbZs/PhFZqGZ62aj5rH4WHDWvIelvsOeVuwM6SHkv5IEhI55UQITRoQemjo9dj0sRfqRI4ao9vXbU74iusFt3JDYHwyWe8ABG8jfj/Y23lSsk3Bnt5aGeT+k/dmNOYP+gJgw9S5Aj8kZhQED0HGAGUpZwKqK5RgURb5EdxMvym8bkkSjLx4XLEnBKSj53OuoAL03u1SgK2tcjG86RfjT7U/9n24f/9QJhahNP5zfDhdnNNI5ndO0ocqvA8D2fwJXTxL2kh0dZ7TGZjw95XLmF6DFcH14+RmDvqCJ1fTbDbyBP49cq2jlKiy4PJBdm4vWtjrfa2W1zKHjvAL2RvHlxqTJVzcFzKZYuYuLXzO0RjXDMogQqSjqdAMjYgsDKyTjc5ZnO3E5TlOQQ9+RopQHNP2WVQrir74VJQmfY+1R/MDBFrtv3th1yUOHuvegjye0VnMMvpS6Ej8PdnvDl3/mhYgJTBx/UnlzPcvjFlQva6qYrpPkU3oOuHGQrhD2MZD+kUGMEqVXKG7KkfEeBtgVAdVC4Uk6vheFsfxvQmE1A003hK2/b3n0u9aouIII7pJ9IO+k9DEcGq2WMJr40SwGCumbd/UOa5Ch0/Vuq3Rhs03rhHLrQytTX9zhXmbq2gbiscT7BSqHREnooJG2uGsJg+zng0QLnLnhXogiCnXOY1zDU6iyaVLV76LREi+d+kukWfS3JToI+KyqUiApx/GivYSWoNUMoWCrraq9BthOXhNtbZ9bESp5LYr4YQcfUtR++mmFmmZC86aI1cBYFxH60Z1+ShUQ/hRiYNCttkQe0ogi+7i9AQt4XQXqbAR5DS3a0O8h9sEq7PngE6NhsqhsjtfKiNZUcLUGw9NZjTr9age5fn5zb1dlJWmZbP61tHGnV3d2o04KVyBm9klta3tbveAZa62fcLgY0kbClsOztmq2VJtReyKH4Nzg4Lr/Z3hyHHAUkD1a4Fvdz9z2sL3Ntj20091n28Pyv9K255cHmqnOzJv+2l/ViW5yL+ogkM2ok2b3kglacf91hC7E5ks6ThA26zhJZffp+goxbpPOt1i1maR+sIziF00EArEZQSh4vJ8ebKDSZ2i2TuMIXiok+UZJO1YihMJdXExbcNoOzuSGEUmBKfAuFX8cVJVhDhpNOpeLwGhvKfLZDC8ik1bDdQxYIl9RCCNmLk/omlaZgwwaZyXe1sDrknRDl0ohgODGDK3oH3ZjpI0Z0DLiltBjaBwuC/Y3hRj3+FqZSO++C2sGr2HC3TWG8D4zjOjA5NtsnhXxbcuQrtehWdf0127oCtTFm+U8psC7XNXp6ZrsufcixEtoQDTrzfaltPryrqDPAQJyVJFftV8w7dd1S+7yiX1TemUdc5ons407Q3g0O0niGUeEESh0BbVISPtZjvgA1TlutVrwn33YRTY6gG05RZF3LRcD7TNYqwH/YextTPFI9VksUrbtIFaVpcgaKPdgY8ZDtgHQRQUGJgH1VcoiXdHDD4QuUlFo70VEkDFpjYiIdMe3A9p8MOScuNm6zy5u+Hr5EDA4n27Xa61QH0UNFOp1jaZ03y7ZGwA89yDUB6eAnOBnukqr5urY06niyl5ocjmBC7ynR9e0UgkcjpEKlI+/5aK1tw11FamKVDiKsmP4lLUkPZHRq+/HMxg3WIEJZ0Xr/nvBmyLhXMdjq6JxpyJ4zM8cpNXrVYj57k72V16dFIx7Xuxa2toW56b6fnwSF0Zob/OTGgmmRneYGYaRXHGUaXbFczVB2EDzjeIIm4wgxXLjO/8lRqKUGyS0DZgeiqsgfWHXfO6766aQC0MzcVJYvqC+bw7frxfag91xsdUvGsNzL/Byv8EFzo+/DHTaBJs2Qd5nMpEh6p6zaTtgrgIMclalwSz5mMy0nNFB8UKu869wGm8IOavmg39fwywyK9zZZJ796t5stBVYQfT+5wIobOqrdh/xaJ0LuV/8UEqt8qBwoUqQ2rW+b249xAUP0tGGL0Ka33NNurjSZWc7OMpLDKx1m1B5DjfB4VQqckf0+3htB2+A0Mx4ODtyzgIbKN+zdJxWmGdE9C16joJIEk+C2J9WdltCP+L5NKrPFOAPw/1mCDyuvcnbmLn2uVnroeR1KlQfPS1C6HrYKNJ6XkQaFy23RGeoi169G7z6S7R/9vbiaHB8HqFn4jB6fTA83T+HV2eX3JhHUFwXgxA/PlENg3CRWEiBH5Trigo7Rcv0YWks3DoyjkykHkhum5hnt3VGbKvjElpMzc6eRAzCskJYp0RYBnQVu4O3JhE9FL8ruvOJzojEa4hHxbM4OIqn6fgo990Vc8jPjTVK6cGFFU+JzFWnw4hukEYRcIDa34p0UVlgruTmnQhGzBoTId/RoQxxR+vIPUOZ7rbkrfQNuemLlJZaMH/I18BxVaN3cX3ip7xG4evDcoNYYvhBfmoo5r1A0c0MNGbp69amyhuOI6f9M0XfUe823TvuFXsL0SkSdanCt5TI1xZjo/MtCQk7qMiHKfn0ZRlCx8fr28J1rdu6NEl42gqYjTjAHNPkyX0f8llG4wpG2Y01NmWVguwwTTjZuNcDeIPttQVHe7YZ+uQ/eYinTWeZDtiop5l6AvOsQ11zeLV6j2bzuEqmGIRDPXPj3OqyFEyqcRqpX3NYz6CRvbgxbjTdmhis9FCg9HbQdzobVIg+222x8timGay31p7R7AZWa7HBrDx3dcjlBdMqHRizt0GhziMLdi949MjBus3pJNhnL/w3Uvw4r96gh+gmNjJaN+jYwrEtSk+A1gCdKyXsNiYI5si1vOuLrVKHC+87qVbF3zNlf1lEDnoyQb+x+QI2jBGFiOkHlMZzipYUdWF8mdExr4ZGuxcFoaIsbDBPnvef0akJiovjZVnlc+VNKqfBGm0joInZL/19O+DoQOjSXaSUGDbn2jTI5IpP0cLRZWQ+maUG+zJWNrm2G5TsWhSeokGdY+MIZ0KHW4sPvdsfRsMfh4cnb537h3zzAkPu8kDvcG72OuSFs2R15B0l33C7PX+EHszGiw/6KnCWc0mUX+aLJCPcARwjBPv/055uzVtJ9beGf3D85oS7XlOHqN9GFCN8k+eoAqGHl/T2KmCq2LAtcqP/8pRWl9oFzJa0GYgtgs0YWapyu2qhS+yJN9eDLxlIm89xZftwz+O1rprgL/1Jgh6ZnXBZXe/8Aa30kp06bET1+eabr78OdsQzmLzbx2NOZVYnCvWpoCaKSOJ7x7Q0nDvQw72+HRqr09LONEe5trulDoZwjBsd+F4017/mS3LYx0MlTn9cUSJfDG6OtZ1qPDmpTBnQiRdKWou75726Cj95sWp0sOurh0bwivhiAYfUUyY7eLZiqHqemIabaWfnJ69PdgOdO0BcxCgG6wTTNxR0sozdO3W6twHGfrRcIp3++MKjwTALYsBGMxt0qq1qH2MobzLVASdMhShV28dJCUSdraOT1xeHg62edjYYxWWCy7UTcVKOqNv1uDHyDREKviV8wb8jmw2qIymMhdlfwvcCXUKR6SoYl/DPlRuyCT+tUofA75cIeIlXekXXVyzFoGIX5Q7iXSZWntZE9Dg4WWGa4GDjgYo2LisNFs0Yg1hPpxLcWi1Y1Gg92jp+mEM6u6ieCJushXUQ7L06zT7mHJ7TJxARv+C84MTGgZFlZghOW7jgNThNMry0kP5MfC1BGaEiKmhZhwQrUqZsDNAGFhiZsbeDYZylFUJa5CUjgOcWt3lRh5zAZOiZInIt9qzYoKxTRUOssUlJQbe1SiRnWqiUyAGUx0yt48/6FCmqS0B7oFnYFXUsyJWBaRuL2/Ju48IebqyXE2FMsd2PT84jkFHeHhy/jU73z/aPBudmFFGmPawyMp/Ei0rZxBXxyyf4bXFTAFHInDVRbs54B55D/juRenm+IpfcHw6/Pzl7HR2hTVDdwZGwPSq0bE0DsVKTcbIb/A6QZ088dd3ZLl1fv26P5YtB2AKhGoHBBOMUvF0naefcOaHqbkgZRpqa/FrKck+bRlUaONAPsk54JLFsUxVWrlKzgU2VKDV5ppp/6VNB3J9sTVzw81kifGfmEYU12NDNyts0MHH928NQneseRqO1ELW+Ff3d2Q+bDTbH6SZZFmlZpWPigaVwGROTlRs+b8mX4cNyj0eoU9sFoaGuaSdEEnTVFkmOSeY+qQwhVngiAO/oRdReeACc8gPmTUCfHm44DFTwCXNzaM4OG4JxbqWVC1trUF9cE+f4duLuE43dGMoElMwHliSWb1gHcdLBaxS7y7IDRUjeeBOd/CX4L/x2Bt82CAzvvS4G0GoyeG6qbKOFeJJrWzsjoa30iMuiyEfxaHancnqgql4uJ3nfAXQODJJ2spwvccVBliwr2Pro8ihtaHi1i82ckt7K1l9Rzb6l+62XHlfO8H+9OzkaoFCM1z1R/EKK4ndMHtT12IxaBaP0+nMIrj5QYnyDbULN9sNqzwCsGgj1QSZr7Phov6fR4at1KLT3JLEGZmt5BHPnEatMOEyzZN4zKssBAA+vOtwbFUn8QVtMllmWIBXiIjVcQIxzu3rCY4514HymGy8tYWWbVufmsP1gor1y7/KqLeEbho8HGQwNf3UgXBz30/3zd/W8OllwtCgjnpZJsx0joiDfh6pBsdGR70ubWSvwTUWHEM2sIwRSYb+rPH5w/mrnJk46lbKliPdq8sabcGArfIpdMLkX46AcrpckNlQ3/x7k6FVxi0vYpLaPXKWiOmkl4RP8jTLAk2VZ2D/oljY/qjmorlk/wpVG60z31uVJk8ZBgpNmznfkTC0pRcZYGozB4z0jww7LJ0gr9iCUCyPSCIjJdW3VezeECB5AZFMYbISClJf7cwERAAfLPtjhIF+alo0uL2prNqLr6fNiXZ8NrDWJ/Y1hUx47rfdoWjqvkKH9bUKLr7GVuAhjLjAtG9rZ1up3aWmEo+605GAzRgH/NN8jWzHnvRWDT9f2n7Y0jzXDN6QT4rASt9DQjEX+sKzpK/t/hzhSPYuYWPTD1JxlxSmsah6ntAfN3hoWcbkRgAV13gtU9puOwyuTQML7Wujw3Hhn/AQba6RF/9bGs82yT1kHtXV/lX8+9xfjJqySZIw7sViJirt4XfBV8PuhRonrLMTQ9i5Bon2oNW/nMjK6PiYhpXDlywVZatQZtD25U0nXKQuf/S2NWqUHeeu9XviUn1G3PsmtxhuHK6xyVzf3bdRtJ5LEJpzqg6J7J1a6VVuutTOzqrMlkxphbX0IFVW4nI2UygpoFEc94u+hxN7Ht5h+vFa6frHbQW0Pz4zDthZsnUi1p6tdee/nK9re1lDNOiv8aEj7vN3YnkTFmw3YxGTLk3rrJ2BdVzsNqUc2wczEMfckmlV1NeEmJuHsepscyU58Udgw4tKErgg8X+Naq5PbdCaXTwFRyfqwN7l81hJ+aPWZrQt04+Gt6/hp4Rtns4R/rG0YerzNxzVcfV5tW9/58epzgUWBtxzDnzLjkEnxbFn0Bg8xwvET73j0yJesWQuweCZaWzx72ueD9RPLB5RJ6bLCrv2+ycLcM29QFChnQOepuT/oLeSzscbLnIz1HBWgqePBCnoYuklRGqZ6VfQCr49dsLNDNgWMqPBpMYslF7XEQ6lDn28543sZsu2bpoOdRmcb97BAZ2xkHx86rQY1MJ3eBHWmGBLiyPVHYm4aQDhmyyiegALPpsnYSNLYk3ogvGFMXjk2GyVJFoS6aUuwCXVdZ4Ng9wAaqzFvMN3L51f0tGOc5SL6/LPG/097wYuuf83UrV0Zl9kbeSyrUafRtHlQ+GtPxWfuVMwzHVfV8KeoleOo/V7auWRH1GFn8XSEc6KLzCtuBmTZl0iQ5ANM1zg+prFOFacsNLe1ApElyaSREQwPMTJsU7K2TkEwzFQCdQHmd5kQ1cXplk9MrK2DFl2aiqgNy4i564duHtjze9Il7Tq+A3ILDSW4OdUss6NVYa0Yu0EUcDc0roA2ufEknSZlFZH4a8it+FWS+s2meQHca+7MIrmrfJN8EhhosCB7ALuwkMVVgRfvHEnSKQcUBB5Ksbf2NVcTK4CK1+ybC47OqVD13z+2NPjUzQDeUtfr9sppcsWjGA/P+PbOcm5aau4U6TVkk8FSrwr0MCa/g5v4Y5ovC1xnBd9wqG7SwkiuQxl7YzxEvqnHIeA8gmanoAB53+gyvSCEgWHiu1ck7SHZqwHXmlVDUPEaNl1A+9/tHxzuvzwcRO/2h++i/cO3J2cH5++Ohpe6iSvHaLzW79w3Eq90CA4iDE0bSlfIm6ymEz7rB/sfoTap7/rNpl7I+Ol4VgLGnFZ2/9ZOWw7cI1gYHzAbOpDpm6+CR8Gzp8+/qkcwoz7sBej6pIMK4BZJpk89ldDTYGScrxBUdA2k+lhh0tEt1cU4zTO9WDETVOpKKmeP0obtyOvxLC/NzOI6TovZmJ6dpvPafPK1w378POfo9dftfGdJPLHB1rqGw+BrDk24LCXqsD4XX2YzPIy4y5d8OJHlbDWVuJrI02wKPuvWNuMWn0Or/PNu8Mp8KVO2XvfwaFHkVT7OZ46Do0byFj2cEf/bvPiADjVs2OZmZ3fYe4L65uB0uPPsq6c7z42u490bzptN/o8mXIr3XJoZPvvlTfysLiAXalr36RDGT8torevC4dJ0plRfROiEOLh0sKzWbT8ITtn74C4ollnGUS2oe5STK2xMNRa+3BlQL2RC1AxBCt3caOIN3+0/+4yZZxFqUxwRKQfJ519/symaO1D2H4gptGbiiksBhF5qUWGc2biS+wlFTk125nGBx6pcSznm1rICNqWvdlGeCBAa6OyHZv+STxAx4RDMmGWRWFoVA50gDVCc94kCSo7InI1yWyqXoscoX6z+6cHr/o/w2Tk62nn9+s/v3u0eHe0Oh//tbG3YIkZswegn1/ils/Xwx52H852Hkz8/fLf78Gj34fC/t3pchmhFhegnfXN9r+y+PCz79B+Zoa+znpz4LtIJOnJRMN71ezhLtSKWTKJxvrgjWHVT/ltE5Q1Gy+zLRSLxzbrHLaKw3sRpJtRj/7CsbwrprtXo+K8AGdO1LlrPR2XN5KlY8WGubZZ1JkOzTCsJoSbsG2n2QVfyUexk2GahJgqBnldWE1Df+7cgYsBMqeOMCfL15alANdR+HcoZUbmgUox7AQ1pjcDWFohTizuMq4SMgTYOXTnIb7OkKG/SBYUzJiUCrxdTMDy+AAz1TQFXpgU2+rzD7ZkzaBv+H6Zz4Ot0c88ojVMWE3+AeBCjZT2gkK+LlM0Ht5iSdIfvbIyrngWvEm32rxgowoIoGURY28BH6EWETE4pwCWjYoNjR4SLLP2EG+qc+roIdhb1trft7zBB9/eZSEwXBDQVA4xx0wtmmO1G+UCkyE7pCexrwPuUE0NqptFGeHTxZRarKwPQKN2iLas+3W+goaQBorgNNIp8M3Ja5DCT4iKxwC2z+PoaVArM8Y2wsOQE8cJESXRcEWPcKVbaYTgkZYQhVnjGnViH0cxQLnLIxGnYM0vJFpzQxcFJI9MHV9MxTaW0PMY2PRmnyGOGS0TpNSVRBk0rqzrMSeRV078PI1bg/VXDF7Kx9BFEhGPOJ6Vq9G0sYKG6ZZo8lQxtChiZr1Q1qBEt0wm63ekS6hkeBRrFpp5i8MxjlyBvEuidEMEF3GvCqBXDdbwMnSL6wMVAcAZk6Et/cDo4O2rxJvJNEH3Fpz5NHy8LHLaIX2n/TnQQIa/ouoo9BM16zhP2CsAfEa0n0Osur1bVVwZB541j2MN5V+PknXp2/V7A0a40A4+rfJ6OI7TEu9y7h1fUMaQv7RR1JHyFAYg/XJssrOQcpRiEzPoFKQk1gsxF+K2Kkl2RnKXFq3q5KvWhBD2QblKjCVplYUtL8RXCL5n0AtXaImbrbyHCqHn1CgrXyyxntSYuyItlBhyazd4lIl5kqItRSB52HKVmhRdoFoGcPcbtC09bxW/ZlGtrVmIZh8wFbb0YRUi/vUDHKaOhYH+yvVBfeYjyAq/YUVAqo66Ia7oyk3mz2k25hOFtIpb4+JPUbkYU2aZZEegMy3rj93MPKNchqjjMgur/PsAFH708OB82z9A08/FVb+E+HmyVbzQizRuhMdaN8oaFLId5FI5veLHzcaF6pRsmb2fhBy1eea2OfrqPVN3bS3rTcrq4iayoPmhghWmEUlE4ODk9PjkfXpyeomVqwN/X59pRXSdGTZOSSaJ4+F59sc8oshosfmzfnEYvV56h4of3BqvOJpS5x+7jb2VDWQQ/a+QRe5f3hrv8wpbkUrrTYs3kMM4g55L02MwbjKAxobfJsDUnJ23aSHDJxUEhSn+Zzz5iHJZ8njT5MlqS4o85SCSwVZBFDXl6j7M3GIeByqDP24Tt3qv3F7WGFMrOjOiI7tlTc8OnhBoTQ8xTl8bs6Kmp8sPrwXf6x/6rV4Oh/nX+w/nL4Y/658uL4Y8eB4Vt2DGFVNhj3sRKtVKh9Wd/CDrjAvgUsISP6RhNus+CDh14oGaAZ4DwKHj2IugYqsIE01h5PH/Z4fb5N3xXgMX+0bK860r+U8lbgdv4x3haoD9ueZeNKQzLbEJpj5HXk4UzTD7lMNRov8uQk4L6Q/DEuHhPnV4uxBGI3aZKb8gylgbbW8dZ9NTZa5zIwqvORpFkeCKhF6rh4wi/ZLI15XMm9UWZyKVNPK3BHHZ8eom75ILOF14d9MSkCmTh4BS+IbM91kjjdq5NhhOdlY9O0TIVUMcDDpY5hqvt1AFscMzJR1gsfhejZVYtn33Tf/pV8N1Rs2tMfpMoNYXaCpfL62uYITX99D3FNgJKxUWRcMVRJ+zLpbYI9BjPrRESkeT2pTeQB6o/hCiJoW2FWuMnqdrXk14DlHb4n3/A6b/oMN57Vi8wFkSx55APWCPRZs+iVOutxo5wrbU2NJciISr62GiasZVAn0AGnYdlV52joo5Ak3PEBgjROyjaVT+QKRjXK9NdDzSYzipt64v2EG27o/k9BesCLPlOo7Lc8JTFvUq5Eyzuqps8e9H/qv8JWVQLuFGCF8g5iaQEw8MFqtcOrAE1fF2M/KA4Rgu4W+JeCec7w42NgsyACvKhNZIZfuToLrK9pXEiYBCexxxt6jHOCr/Mt23QTToDW/oY6MPchmOirZ0PHeXArDd+7LhxFM+qGKqDfTlRWpYUEVNvE+TuwUcDpzwAAW3e5LshT/pf94PmfbrGJzzFPM4JnSdxzecwlICGBoM/phQLueh7Fv91Skm7W30AddfbBUkoZSnKq0VOdsixKrRJHu5nvTzr2Sx1B3obbGo+SddiWf7mR1GDrykt1HqxTh29vwq08iV+QOHEY2LsBc9RtYA5nSwHQVFmg9K8x2WLOoI5iTij/NN16Uo4nds0m6DUeZOvGkP8oN5GZ9fG1rC6xjamK0c5WHdjlNzg7QFaSnhnTd+FKPJ8dYLVtSTDj9hZySykZ6c1lqsRttWo9e1t69MHOnUW4QL2krCYh2TH4dtwmwBC2zCD0W4wpD4ok/+mnWcjc1vv16m8m2pdjcZX2pPXc8SgsRyb9udVlTeaHh5z872mB37+wdZo38cwEjkUu4+x2ve5l4HFoMh9zArup2lmcD8bja2pEDtUWbdB4cd/cHsf6dP8uHurazUy1OTNiOTffj+jn9TXtRuy1ayjxV7oyJVi4P61lVgvyvdSbJs1f42BXX0r7rdS6b+cCitlRd0x5QPQvmNg/jsRnl1bmBl5MlECi8rEU6DiZR6QO5VRN54EeWYpYDrHwXIel7ZKQk+YhdNXvIdgvtfP6d/GO2We58F4PXizf3F4HiG/Cn4f/LenTptXg+LBDIi9S5Ilu5fIryn+WuXv4LORfZ+EGAnqQyzub9A+6hzoCTvCwxJlX+FTIorLzNqsz/wBYhXF8kWPWjSsYI4yiiPCt9jxprqKQ4vdacaWpvvv5hzYRELYVrxhnk5vqtpjGYUa94AbP599Gk2X7my22O7MsR2UdO2iIn9iPL0nAatMq6XI0expzC79iLSKt07KIUXoQycMMmKm1wbceiCyEg3I6qb/VJ3ZLhZFvihSjDTEd1DgJTuU1iuGu2Hc+m9MvHxZqfOyNJNTt4ZVxz9d65rkRMskDW9HHt1Sg6aSREjbr1aPWi1wYgfy0V87XLen23PCoQonIjtfCaTFeU06BXuxlRxO8ivtIYouN3EzEwae+0kDfq6m3jacbQ0IjGqLBk3vGrU/fy9ZbQ6mmUtXavl+NXSbjFR07ozf9DbC40Yk3nwb2VCBlgus8YTdGBdpnZugoB/w93pSsnclkndcpIsqNw+8qAtkvLRPaZ0adKcEQDkeI1yZBLl40nHq9MmjMkfm+r+fPnVYPgrlVFva9rhTUA/6fHnPhd30kiOFS1OlWGaRODvVN9HxxJ0zX4xVpA2aL6AVlxJXu74bL4GPEax8lZhF9ESq01VgsaRyofHthL95hxc4juJ95U0ym2k4MHyLCoZymihAEuUhYp4jD2mv0wXG+QSW456O2rfS5uGPIzKg/qLfqlCr9pfAGVtWE1iY7Gkl3nw14Xfxhj7doKWQICQ5sHsZCldL27b/CHc/FQGUM2L3rDqkR8PmBhXlCKEmUH8VKJV2OZu0kBe9Nwg6BcjgmjotN4c28bT5uU2SOy80AzBLEyg+r/ux++FWz8Xd4PsbShDDwdFms/pqJi47PIVEsQqPsX5Oijw4e2XT47VECrfRphbU7N4NhglGsRkv61uXaEMtlyOYeugO0z+lvaPb1+CsVMgIrV4bnwPO2vUQHK6jXaQreeH17Mt1uXBYcV6EuYehVq7NGWMBM9YmwZR1xVfUKI7SLcYeVvCwXH8V5YyFbeKootTEHIJGx9YwJ3LqiGRkmYaKOiQUBVuR5T1nyTktSKYrcxAVZws8emZguL3aogISA8W0+nCEzqZnUHWUcKQcewrcTswO4CEue8qr6rxWnR6U6cSmhzvNeQa8xyXxvr7dtZqkBo/bDc7wjw5SS2fmJEOQckZl1Jkv9m9kL8dlyRrcBBpFRYUgl+r2JTMtqc1nIHWkIDqOaZCUp7bMvJsYUUrIXZ+OE+hQXyJ4K1de7czftdeIxbV3jUzwCPkRP35kRPGxD1WJ1Mjjd4ML0pkqGlbqL1lg1XURY7hWLDG1RcBoUeDQWx0pieUQdbDU4yC9fEIChejGa5VbZMLjWqaVAKXUMiUvKZ1+leQffcxDCZVI77nLlxY0FUpLrkuj1M21UNTpEVy9v2G3JM55naLBgrZFO+CWOgbCk2N2wyAn4Bo9Zi4YfE0O1JQDsAWNoujZdKSN9degYk0pmaX1ztpKOyQ6PzbdnGkdZBT4WIX+woLv3yu6vX9fT1LaarP6fhh1yALFpxUFryqV5RMqzRuUhyY8Asb79/b40uKbJ3FWqqNR1Qm10XPM5omcz8F7W/x24i5L8DbBEsMD6VuQnforRoKdzSxAakAkEga63gPwxSIvqq7BFxwnhsp7tKplFDSATjabbqUx3yxg2vncmW8if+0G+8GLHUoUjBBULDz0RwSdrQIOWXSVgAYM1JyJjm5hTDGnHPTgRE3Z5z3PlESNe5Q7Xh/mkbQJ4UUbBDrgUktOZ661cRyP80JxFYvLkB2Wj8SMZASKqTAjQYzs2SON5QZuDZ5CTZHqkNQcjXDNr6/tO4GmZuQEOGW9QvI6bxa/nc+3t/ZVvOGQYv3ITqzUFp0nlEPnFdKnLbuftpIKes3zr/+th5kW9hgvVFvrGM1UBbdvaN7eoNH87W7xbpB4RiC9vpPcDoATl3/C0oSAXmIgEFfXaxCsJYKORGzZCrbY7eCyvJkln6K/LfMq6XziqKefJHtvedV1UVQBb80MahzjgcxgFd+bUYnGXSQNAbcNscu6TC8Id8YhK5Z2JFwKRyv5Iogk4qLHCWeC8AlMkyfljU/pVc0YtVe14w2/IwNsye/NotvaVEaRR/ALDcrakbtHkoJtg730v+EwdujiAowCh5UszhJ0U3Qlf6YBB9YLVR93WNTHdK2mMZSi6LdETRJiaxcC7t9mjuw1Ydvi9NstkEdrSwPiI9cELtWpsxLe0Qk4JiNJIV555NNy3hRncErZ8WDR9tnJmyFiP3WdRYYU/GSGsjSyKmuR3rVqUsIuQ9xfManMcqvDNa8itAnFHFIb+uaUl2FtmTif2VrrRPJnwjOIWyR9UY6cNuDF0cXh+cHhwfHAe2pR0BnmpoEoJEeytI162BJvNsL8KBL23GWpxd6qtkxrHJqhn+qfHmfHsorSTD00LPJHcZYuoDkxsGv9KAnRdJNkOvEh6PKBmBl09Xw2iaAGBvJyEm2Rkx7qkJx3KZW7QSjV8IUeuTNE3IlUb1OCxKXwIbnTUbaJaEbXI1vh82chM5G7BGhXQSOOK7XRTHxXl1L1AIa5yhx907bdO7hvguqXobsRyqZppZ1MHNjWblo9bCNRXWfrYbkrN4uN5nqewtYdyYNrpY0kZbUjGSlIAAV5AiTvniTtneeYfyIdsRGItAI8K5ujCtLvG/dq4f8n1XzxpEr4LhEGdizHRVyNb55QLC90sH0iXsNPpACeVJRPRnGZjvuLOx96KncGuVJzNqTFLL4b5fkHFX9DkJQQ0V6klLPyx/ltcvh/9U/AArrW/zldrEHMgPiKUh+g6M87tAQAFomLwMCq+tmcBuHpj+fvTo5pFCiOsh4ae1Ys7lT0ZWvw6spXsjOGuw5XrWtefqr3NHnadvoiAv4naGpSIi07oYcyfNGqDUYDgB7qsLti8hpdQnfoXbmYKQi3xbz31vddnpu1lrauCZMHKwVDBy2TlGrRnB3zK8v/pXnvmuKOoWJNXsboCNsTQwDueouSJogkukQnZKOqzgNSBxok4cOSSCuJPqfSsOAfc0o19muf+LpKwnAbsCTDNZefGg2tzmjlaUqLiNZBXFP+3iyNhtuAKURa78wrVmYFI8ZcWkY0QJOG3mgkajIzVkh6CjJKWW15yCagPfJea6tNfFUwuvCRfDwynTcyuMof8xpzTEdng/4cmXPHjlutPmWyiNLJJ556fbT8d8I933n8tS76p2DnmV+I9CD/sNzT+MPeBe8udwVQS/TW1kTc7R5jJlktHRE/OJatyUwoMvi6dGrNblmxt60ppnONmFo+FjfnlXemyvGxc2QvkqV5DHVwOvBEhKUETo64rA9+DRXfBk/neKyTO11Wh8j6m1MTj6/2CL3GC+CJew7CjTIgwbeXsSSYssrFpWthZM1AT/P6ephOomLUq93BbsmXSazJBTkDNQ6NzGHAdCNmoH8OQ4h5PZxwFyqHC5WKRyVFgvMopVjVnlIqkim8oa3RTQjS7u7lTTFy34tKTSOb+IWyec2I3odnnsFD4NEie1JelF/7PqCl8yjPOlf75kOkhnjdcMQT57l6i3VJuSqNqCYOpRJlVwI6bQqDx9ayNSuM5xNnidJJcSx5exsh1fGzrQWPZaZiyECb6ZhTMOKBRonBNfm4DkNZ6oDFceVAEikmqqElGFinDgkXlOWNJd5SN9mw7TrKyCs0dPtesQsL7qXQ675yppDvUOnqgUvrJlOTFzi9zENu7zyiw8nHwFd/yppXmpw441B0fXYscfCpQz1Y+QrURzqUZhLqaV0p5aRlFeGYjv787uzEdEv/JvAv3xwHXtTnPx3l7XR5VXs+PfO4v/EoPlZxw13XKdtpqh4zLyQc9PtBwlADHrsmDAwzXSt0sRxq94JJju4kWX7rG1FtDWq9V6ZLqMR40iV9j6d9OgkEfYq8knfpFVLzOzXh9amL+qJtY/zniy6iqWb5S2tZOT/r0PmIQmxrX1m6MCwtYrfMQGuhw3acjeq03fCOQm9S2xkAq+qD1i3fAJP3KSeNogTc5E7ArAHt7rPkutJps/PmzSPmfnV5ioqZVAlGj6RM2x5NVOowiyX3LHTmhnHxJAkFfMgBQ7gVzD36BTOXJwnO3kU+m3W6phm45YKeN04GxlBv6zAS1MATwd+hpa4dXzwJYJ/gTALz4CEU+QzFaaVTyiig7EIBwnoKopsP2vHJ+WA32B9XfHnU7q44KNB5IW8214pQuO3A3Gk6u5Lx2KBok4jtBMRy2A2PByxje942rn1xT9BzS1GYdkLXdaAGiKPREw9rGgDluMC+aMJhYSpMY9e1yR51d5sX5un16K05on83IDMyFuJ1i8fetfp9D/lNiydbfK6vhZTg1dHr3YdlnfxP2QtruaURjLABuF0oNGvyeaxPHpKeaBx+ww60SJNre0X8stmjdWJqs8+GsnGWsJqSZB/RRFhZrhGu1dq0Cvtt1rDYqGzbsnJMX2iVtvUB7ybjt2ObMrYWqB05W8nW+r2tt8LU/t1e8JS5gnIBtSDwoUmLEi6rpiCTWmeD/ObOmNrjAhLKeK/29N1TO6NSOumPz41gmyzeVUPV1GqX0jntzmu3jtadRG3UtsP8xqKEX8GyAFGP1gJqxGDumA7RSpp7wBPsWjxNMUG2N+cAjJaZvEqeGwqxcQTr0Yl1dGQjO96Nuu9hBC6OQ7OAiOPYtvnUYrgU3Bhle0T9BqarSgdDkdB3XSK4RSlc+gOeELhyV4UMhxlRVXesSaC6ZIGqsaGfUR2ajrDJlvMRmpPTEqOMNiIECnZOVbuSwlOwfelgKi/l7QHeannao+gSsoOyV6yZq1zOI6scxLGPCR9yWZkA8daP+fvBgwcqDagcqZARRDoinXADljSi50ccIjKCyfD/AVBLAwQUAAAACACCEJlQ3AEAIsAdAACPdgAAJAAAAGFuc2libGUvbW9kdWxlX3V0aWxzL3NpeC9fX2luaXRfXy5wea09/XPbtpK/66/AKdMx9Z7KS+y0c5OpO1VspdE9f51sN+3leTgUCVmsKZIlSNt6b+5/v90FQIKfkuV4MhEB7C4Wi8Vi8cHlcDi8zYIwyAIu2DJO2VMKz9E982Kfs2zlZizNI8HiiF1tshX8HDI38tnRcDgcvGE3q0BIUPgVWcqfIvYUZCvABCJCEnBTzqI4Yz5fBhH3S1pHzMqjANHHQCuMo/sx45k3Yos8A1S+IVQ3DJlYBTz0AXexYY88FQHQ8FbcexA2k0zAPzcUMTAHpPIEeVkDaOTHKaAtg1C15okfKHbuY2xnFrN17AfLDbIV5ymLn5BCsgmDKPvA/EC4i5Af55Hi/vtHNw0wawBQJ3GySYP7VcYs4Prw7bu338N/P7CPPPrTXQfQTp4BtzFQBOgrnq4DQbwDtyuecmjNfepGGffHbJlyzuIlNMtN7/kYGXOjDUsIn8WLzA0iZNgFeScbIAewGbZcxMvsiQQF/eIKEXuBCxSZH3v5mkeZm2GNKAHBLJAqG14rjOGIqvG5GwI9YBdLdSH1YwwdkXIQZuAhlTEAeWHuIx+6OAzWgaoD0UkcAsgB4VxAO5DbsZIx/HJqXJIvwkCsxihfIA79DZkCMz0eIRa05T9BGwUPkTWggfpJLS45JCisJ0HBZkpUAnOeVvG62poAeVrmaQTVcsLyYxAd1fon9zLMQYRlHIbxkxwBkR9gu8QH6r4bKHUX8SOnJsluBz0CjiUf2BdJ2cWqSKxQfxdcSQ6qBjm7lValyIPIQA8CN2RJnFKl9dbakonPU3Z9+enmy2Q+ZbNrdjW//G12Oj1lw8k1pIdj9mV28/ny9oYBxHxycfMHu/zEJhd/sH/MLk7HbPr71Xx6fc0u50Bsdn51NptC7uzi5Oz2dHbxK/sImBeXN+xsdj67AbI3l1SlIjabXiO58+n85DMkJx9nZ7ObP3DwfprdXCDdT5dzNmFXk/nN7OT2bDJnV7fzq8vrKbBwCoQvZhef5lDP9Hx6cWNDvZDHpr9Bgl1/npydYWVAbXILbZgjl+zk8uqP+ezXzzfs8+XZ6RQyP06Bu8nHs6msDJp2cjaZnY/Z6eR88uuUsC6BDrYQASWP7MvnKWZinRP4d3Izu7zAxpxcXtzMITmGts5vCuQvs+vpmE3ms2sUy6f55Tk2EwULOJdEBjAvppIOCr3aNwCC6dvraUGSnU4nZ0DtGpFlQzW4PRgsU1BZx1nmWZ5yx2HBGjUBNE7EIQwPR6YHA5W/zCMvi+NQ6IwALE0lIwZldLM41WmxKYqyTcLFYOA4bg6DOIXajtmwYbTYTwuV9UtC9tqO0/ufh4CmTLDEe2e/e2u/HQ7QIN4KvsxDmkgAZgMq7qaCFyYbbMASzB4qOuq4Pbj64xBIAGe2JhlEy/jr2zt2fMwOofiou/gIi9+3ln84vGM/HzPraMzej0BkSwagHwYM/tDcRPcOSQBxs3RM+WDt+T1PiwJIywIvBJtaZOOvzM/4c0bZkgrlLYLITTc6d7HJQMqYfz75/Xr2v1PF69p9FsG/+ICHgrcytXAFl1ntvFnIHM2XoxYWLeJR9rF9gkU38DhqMK2m3hbGsTmy4iUxnIRuBl26tsFIpZlAy28N/3Qf3eFI8o9/b9h/SwfBDZ/cjUDjL9jRIZDOhF1AlZKANljWO/bTT+zo3Yh9z95JDkuhSKKz7ADMagw2dYFTeMxWLlhglF+8tEgE7D+Odfpq4wh8dLJRWSVJh/1uxWTngeOiBP9gWodRF3JQZgtmm6XRIv2XchiSEVO8FsVZuqnCAhHrd2s0KjL5s8eTjF2Cci5hUpmmaZxWUd6AhL4HCVUyt8qoKSdJ68f3O9H68ahGy+ch+x3GL8nC9X0HXAcLDcwYnQglEXD4Jn7dq0AnRZoiHM7oEyIoZtgOUiELAb+auLQ/DngDecityF3zkvpM2iZZNlZSJxcNZkGZy9wlmCbKgE4FjzLOikodTdyRdAdG19GoIwriKxbeAT9SLZwz91+bUy68tKYfUi/A5cqUYoyZwS7+YaaNedBE/DHx7nmJBnRhNCYGJnhUeZjhQEMSDiTj8JFbI4M0yDcjlsZlPWOFOKJxET3GDzDCHEdgXXa3Wr4pHWQcjCy/D8ERQxcbEu5jHPgCfXwlagS7B1cTrFeNCjjU8WMB5KPMggTmF7s2nkLNOqiAtEyO0YjG6JhkygFsGR4JYBtSo76UMig68BwcMv9c6lPZmVt6ETolBJc74k/HF3FU6dQcZk3LICpZH9kFlWojzKnFyAPKKHAk3rQnWKhUxswmEYGWYhl/6hnoBiA0w2ioVqSaHVOCqw0+TWVU01vsvUJYmDAoKdwezX10wxzLFSHL0cOZKDUUvKxlLFFHda4ptzpcVW/LCU4maIbbfeRSJxu0tnSybGxhz1RS6bYuMCv3g7Q5nWArcX7+OlQY4KzDI1YFz3c1uL8DID5JC4MOFaZw8SCrX6OKOq4ePOKuLjgiI5kCPzNfELtgMASs2kJYhMOklAa0xg/kGGuQRF7vqgOtGKwvHGuoBjTe5APmYD00+mR+kWwfjEXFe49HrLl3TDpq7PWPS4Rqoy57p4s8AOk2dwNpPgiqhRH8a1qDFkxdVbMZmrSC3dnItLV6e4MKiC6p1vndasoK+9NpzDSkGgXaDq2NcW5LW1TYlOvg+RzArtxsJX0QXncGtI8xYWsAhGkpW6n1GXojsV6rieDZxkEkaEMA/F5c2Cu/w5aU5IYZ1QtIIUdXCifmq+nV0dtDtgwiH0giehi78Ahr5EwP2QXuPKyhcnSFiRpttel9OfsHuQMUhjC1BoJ28dSyiDYT1J5b0aCuQQutUGJ1ejyeGlQV5iGKnyJVjGbk3/9n1IYOptFttDc0Zn+DdWOIlIRRIZo9nY+mr4Bp0dVKnV9LVv/OhvYQ/te4sHbEGg2G0FmrMKRBm3Poy2oq68B+ba9jTNpUt3wwvMx2N6utCsDgrs4xEq57ptsa23Ahu1tftlXDKrfuH3zT4tClbiA4k6OMiq0hjQip7ODPg7rg3ixWoge7KVLDWUGUl7eEtjk9WF/jiHChYUimta3GkuGFjax4rdKEKpfBEH7ZIqPPAxFEuBHocYsGheGH1talkiz83+aENa05ATpSyoUPU47ZtraqYVITS2XkBMJJXO/Bve/pAW068W8uaWRpDmofyO1NBPd1V9MaRZG0y3X6FwDjXG2xggVexbSBC9IEg5imuHsrEu4xabQFmcUCWZnHI/s9Ay45Wtr3PxiLX5NB1ciVKwrvtKPXyHXDkQuum6GUCIqbKr3ymNdHppTNX3mQ4kFAYAoWRVJMFNw3me3hDvWcPwJGDrPBRo46YQ67ViuBaaQm4jz1lBNPrUFyAk2RK4q8wcApZsDj9jlU+7bGVEsKPcv4WpjutzHNYiYNbZy9YIiST6o7tlzqS8mTg4rMrd30AZlTQoP6Wp1ZQq75sUPvmnbaZpfojlcSQYz/FzmjcSv+MgihsQSvd2AxscghH0YzFZQw6qmX1tKF8dsgWE3UIM1kB+0gSvJMLjkUb3IFYjKauk9OAScfOqlBfVELObAlXThrN+kXkwLAnw4SoH3ek49AsSj/l7l5+diPvmjHL58WXfipG93zLTJ8LoDkQxcpbkxgLSTl4AqDxVCtad6TWacCIk743dT93GsjWxwbdGGKVcifnb/yOCP0JIBFNvUr5uODLOrANodPbSh1YNwKnp4GHmmc+ezFYchpW7GTVQQ/A0dXo+rnHVElfxq5TO2A/vwNFeFfwZZRoQDwp5uEg1vhXLa/x2zUIM2kSVvtsFT4MFvaBuvF0TK4T/C8hyzdCaWvZLodIdmkXIk82Tj43AbnL9bOfSRHNzzjL/zYmNUG7vj5er1xslXKXRrT9XQVoI3EKssSmOTih4D/6aaSQUzgaIQEFttl8RYKQgoDH2vIogN1HTp4RJYpXMyg9AY8jCLHLkA6iZR98fnm/OyqSBF60t0xkvkwgBo0x5WWy5I2TL52g9BZBzDrr3PQFaiESFC+fT47n543sxHcLsG30I3iqIM0+DBd1CtIWyrAI7Iq4ZtKDhEkoC2E8BSvSuhjJYcIEVAbIYT9fHNzdc3TR9ltzRzqDiGTbTROfp1VERoZ2yhck+9ZxWnL20bHuwq8BznPGY+JfGpD+CvnOcH8Dz20gaQ8SZVe4mMrjIi9B56JkndKX3czatgJw2L02IrsgVwhBLtRjz1gjh+4YUw277R4UmW2KutDx1s8JYlPkGqSMWD6SAkvxYnO1/p+rdJa2zW5ClwvQdKLkjupJ03+KnB9BLOAHI4b+aPRMbcXK3sgcPWjsSDZhwWzF93AEbIjK0lNo8zs7eHIlzTUQ9G7Ua8CAQdhnHqrOFZWOns4wZwTndPYXTX4MjD7q1iv8VJT2SMnlKP7qK8KA7NX+g9VFc0eXqWkyzhSyvipeFIE4o45SKOuuRCwFlzESnvOZfpjXNEmA6q/WXXtzh6q+t0tu636nqch2DE5XQNpvWym/UTaTrZrEApDzuF9JDmu9XtJaghNUqa7SfYR6y3r4TKNF3FWOiu1pGLMzG2j9bwO08Qz3BaZoSYImehzXRS+qE1zv5+fza9OyolOESqnuju8rub77CmI/PhJ0EYUrM49Vuz71y7R4NWlIUAfHQ7lrlBzq+Lveq+iwSXgKZfZUY+qLXeDgXlG2KCp7hnpU9lyH0aevqr7BeUhbXUzUh7Qtu9GFttAtrmzLzGGUgVwB7eoZTTA6ybyuMfgo3miidtK9bzBQJ6vHFe3khpaNxwN2vmiUs3YcGScbtJRhjzVgCLHHHL77FXRtr0+D6oOYDx7GVQq2H23irz4OV2BUINju1Ew1+fgyWf7o0tm/xKvwQ33QQYYWPyk7v2eyH/GQbQnqgn/UlyB4t4TN49eU3Me7V13sfdTWPXd0ZwkzMXLcfNoz0oV4r7VprCwxs3sl6OSdGGRkm72xM0KXX4pZl6ZGHduqwCjhiNorebHFysVEoh4Fsbe3ugA4a73sh6Ebsr75dgp3kwLHreOqLvqTNprp6vTau8k0jLRlhNhL2b71NjL16Bj+tsyzW3xPEdNL1f5uu1uqpmr5bt1xiUP9JvNuNKfrcy4lLX7jHs7P5tWneTDXn/ZQMU9kn1xT2B9AyP1Jo6vV/rUrnXUF1Ta9ba9tbvpLeHupbeE2au37Xy9VG+VsnTpbVXE2/S2WAu1kdiqtykH8ySyb6a5il5Vd1Xm7toLeHHCo3YF1FV0nvGB+x+GDhLgHTq8hQQeGfivIYAHvdi5hwDeqv5b8AHqUNPYB/+eZ0kaP6ut+Jdiz1XxPi2/JKGdBni9ocuCbCGB9ueUL13w9sl+fHYjP9yzI5DWnPvEzmvpyNOQqzT2uBB7tg2wnzevYYS201/ZkCswCE9x6p/ffxMaX4Jspfprzt1wvRfNyUJkqetl0L7Am+TZ6rWN/CaEqLu+CSXdvtMAjy2/RQO/DSVq4bdj6rX4168hgHu2r8J/Hf8nrrfir6RxG9FFxdeKkezm6ywVQKUcUvxxrxkIoLyQu1Ge7IMN3mt99n1BN7qRt3kVBZw7N85ig1cit+G3+69dHs9uHqzC3suHVbi9XmwXdy/1YwvnsdOTrQl6my+rwevebEFmB39WJHH0DTc/NcG6Rytzd3dpQZz6aL1FnVQdHfoIuF4YC74CD2RfAviK8Wtwuz3ZEr1rLHTIatfBINH3HA0Sectw6GDw5eNBK1/PgKhKe/uIUPDNIaEJbR8T5SHUtxsWxsFWdWSUBbsPjjki4fRdXvfZ6TytQ+F6WNhR50oK+6ldid+veT2cvlj5zF7u1r/mgeRWFaz2RDe5bYrY+e4jaNBJyt2MM7ehaXTVXiSup+P6cMHXC3xBB+/h62vyJdTOt60RiDgv38tCSRsX09v2B9Xr/uhf7Yind2IQT81jO2KWs57ElQN+Z+TCPhB22VG7Eqh2LRLpeU1U3cj/ekAYB2N2QO3GB9UM+SiZoueS/sHdC5W9W8F3VGpSVmyOrOtRHrFWYwa4EUYnWeM7G6VSavVqPZNGEGUpiJyqg15Bl7NLLXjAnEpkNRRJpVlR5V0g/Zp6pdbyfZwt76Y3XitCQyZrw171Msf5+uK3oqqVWcMIpJV7K6I7Zt+lQ/Ydo1aPRygP801XB1+KcfBuNwZkcejJcYZGGSqZLMMnLJOFBIkuUZ5yWa4SBbqEwLdAZDE8Vct8uWMhZLlOVWHuw3jhhgpEJRCifFOp2oRgTY9tLYAifOxqgJlua0GR6GpCJaOjDWYap+xCH1z/ES9MOHjrGmPv0Lu2z9lAdf8F9J7R/3LYVDGsIGsagyCzkYw1GuAPEK1jmTx4bhhigDIA04899WsQfOO1WbEbbSzsdNwGdoboGjzgvFRoOb2h+SBfZ40o5AzRgfJ1Gss3f0w91a9I5dEiziPf0ZFDLJXRZEAVyM72aGpzJK56D+xYBbo5pyTOhaWFVfC6NomhApt4oWjWhkUSfVZ2oHTeDFXduxW2UusGhy38tXZHvaklLP1XBiUYvUIIHZXIV/URQ3UG9bqWU0t0HayctLYlrI5RlaXf50dYPR0OOjW5iE/T1gPGjAVW/1eeyZBuqpRhuDp67VPFE9rojilAAAt1lqZyEkBBGhVBBdSi98cBhN5xK8zWyMRSxqobQ7YSMXQFhhFrRTMNWx1T2rYeNACo4RgmrxtPA9VwS0vYjapg2iwAGq0HvhEWvvb9t4enNnsHdHybgAjC0Gcso6Ag2/EVWBuFgC6ObSMgodrww0Bk2/EllIn/GPAnbJYpO6kzqOGANMTSoQEtG9ENL8tNDOK6G4GKAb5q0LZ3CokjLfvkZV0isc0eeVGHSHSjP17UHRLb6I1dO0NDvKhDCpgXdEoBgjiFjdMCh8XiXE/IrPAtwC+UsbCoDWTYfBmn0003Nl6BNOlIlrZQUm3bRos4NY1tH00L2NPRfWCdGKTb6VNPvYz+V8n6XU8VTUu0sFrmP2HLS2HWEO8LRd+/MxdteSsGZWA8vRU6DPA/ZeioIFmaezIqWxBlhxgVUMbYg1z7mn6s4c8fhyMbF9SqptBEQ5RDwDXVB7tAGdy36spu5PNnijlowgEMgkoIgNYA+GxyGcSU0i9tIkRs6xQVfURMXaIS0j3GwEJpdgIzaTb9K3dDdJHredKTVnehK9Ea392xn47ZO+NGscSd01vrc37Pn0uCRmYybGDUYAnq3M28FVeefDUywo4V7VTPsGZP21WLMt6wL3H64KbS88Dwr5kb+W4IDhZbgAoI8KxWW1ROBW+0hJ3yJHQ9bqUH//znwZjhDzxggAIF43DhuYnevii0VD7U1RKzyjYovbMWLRzEqQ/5X9/emaa4UEFrkS/HLOhAy5dfg7tRQyeLl5PxshgGw7WKt1dtfDl8jNgjU2srCmrobqmrOrOqyz0qS3sBhsq+TBt30sTC1i3G6KcisywkwxaSA1qU51SOb2RVytWGS60JKubE39z0XsiJEJ9UD9TCIUnQhhRGVh27UpfR3L0rM2hsq+119XTWYE4C/Jl7jhGuTu7h6NeeQfoIYNr/lNNmjZUlakobs2zREr1HxsFrjYylQ+RliTVqoMA6CG83cLQDGARYhsPJFm0bRhIBg52UOFa2MGJglWCDmnmihltyNwdkht66cFQ4NieMPZWoBi6ZAlKecRXwHabjcrPYNiOTBEtNsV0Cy1TFkIKpALdNKW0ZMVloJCkKxxLcXuolRQUKqyJ2u6OQqfKSDqTrZHDCpeJywy7cQrogqxg1tvq4Z4E48JdJCdOZigTT4pUDWcM7iLCTfsk+rRQPlFFom1wxEvKxjIR8OPrQUh+Sc3DH1FL08NmR3toHPWeXeU1ZmEpWS8ud2BJZMkqSbWX051fxuUPF5gjYgWJjDA0SmEOyXoNBEEO5XTLCLlEoFblh/TK/YaAqAw5Dzkf86XuRbUIuMcrdDPQcithv7yn226H9Q2UgLhNgVRK2kzihADN4AIrSF5kf51nFAgF4q6pLO1vd2cHvRHDLdzO3trWD8RDBZBlvjyHQ2Ahq3RJiGUGkUyxJVgDesJmMFUVfcVi5GF6PkY8e0BcjyFvXHpGMiRdkdp0py+BoCcMHiY1Qag1mCKHOviLfjbFMbM2TNts1C6r/6DxHGEqE7AxlbqE5DYaWGq83fmRBeyg/2jDsEjX+6IWOwfpYUagyAABGfxdFT26Ee3CB2n/6hOGHikLB68oHOY3moSngiSmwhjYZXQGgZU80BVDj5ybNqzEnyfLUlJNIikqUW/0nhz5ugaoTGeR0nYsMIzEiq/jpBlcFbx8a4dciv9ZyTq+CN1qOgDu2HEC/dcuJ5I4tR053aDlUIpvd5q4Y5IIIDGrg48YFXn1lgJLLSJj4NQ80c9aoShZ5NxtZcyiQnfSefJJG7U1hAkyfMHcRqP5bpNx9MBntZhIMeRhERuR9a/jPaFgdaPJ43oBgw76wfiVJJNVKCSi0DLamusnhSlgtKtqEl0quGCgnHOiIYKz7gkfQq6mbkcRFc54IWhSZzAwwU5WLzAYqhvGhLGBj1On4/ET+xJGq2JHz57GaeUuffutMbE6jYLN7p9FlmItVfdbFPIAnA1mCSoaaSxRDQpIaTu3LHiMJ5pkALXPvUHmSY3k6Locgzpt0Eqii9Y/anca3pfDej0qP5Sl1E2Hh/wnGKYR1VnAfcf+4XLZ/mU+urqZzZ3J9Pfv1Ar/xcl2/RJAnPn6dqAXp9up0cjO9NuSuK8Uo0PUTpGVlv6CDtbGubgT4VXRYaClwutminlvcHtYIqapYMvxJqr7JkFrO0goNAwfTkZmFT9Dd5A+VFxiKazuYrw7XyJVxWYFa+HYqpH8qY0bqwP4xLuuS0I3o2wwfyGVa4JV7FvjcRfXBj125D1gLxcBStAr6NHzRuof8ERZE+DEqdbSrvk6kvvJEl4do10lglGM86ypCbr6hal0PQ08anFOZpFaVRP3EEM8BnxzH8kKh44dj2E+KpgQ5/qjNNSWa8m4EKwDNLQI8aLQ16YKBMTvIOG4k4bdPKOdgzKzRmP37/yq3WioM05PRb9Qmn3ux3JZGEdKRq/pWV39HmhpePYyN0+AeFiPky0FJceJuY6w081MRYSwP8TQ8WagDx6ECxzmo+loEvau3hcBdLoKu9ys93FUHF+5pUvXAEF16xERLfPCCZzSTBcaoKQQCONAyOKj7Ug24J5iVU75sATUURvapFK68BDVWoiYdwmRBuKJN2gBIFZHfRnIO9azvlEG6rYeqstDvxNAWGkvyy3L4KQ9NwSGTD32In2JQEW8Fyyk2eLHkI2K3Zt4RftFDx1DGD/DpoON4kyihDdPqR/y0YkKf34dqXwfbPlY8QVGVCUMK9CUQ3JmkoONJEm70h0GKxskvqhkGQIuAtuAOKxtmB6qiA+mm1i+atDmUv+GiXHmUv/T0AvPcCImC74p8BtxnzWVR5W8IrH8nAMFzc8G1VKMD3VdaKtbIHrLv+mnpduhYuPX8ss+Pizwi3oCUHXHMQne98F2K//tBRwEuqABLalV3kGfL7//roKK5RGogP12I4W0yrr6ug0FVijjD6uNctc87uvI2BTp++tODKmwz3utLMEBznugsdQWaiJABNsDB6YnNOM+NC6ZqXpOHI1fTK4ah8VHN8Pn9D+8Ig5AJqbjCiGGKZYRndvTjj+yXOac4Pf6E3AFsGbo7ahsRBEW+HMg14XhbbtS0i7rMLuL4O4K7qbeiPUT5mT7NtOKN/XKrPxX5m/5S5BumrifCqITRKoLnlu8IoKUNIrqcyzdMKl6SxoC/FrbqCfq45QqNT0TWHAiRGyBvmct7j2Usb/VZQh6pMOfct5l1DYqQkI9COo0fakTSZLLjNQdQV0DnF041Mkr9IyWiHPwi9HPFy6/AVz4N9pmn/EAUNYQws+MHCrgQH9gkkmIZ6olnqD99SIKSOrPGzy0aBGEsL6FNcpalkzSUEdQCDIL5eqJBD+OVhm85pY1QV+nDodgSg15RXaNfjG4x+k9TBAsahvRNMoMYFPIUP8GJ59VaUmRqiw/gmVZRWUC61G1p+FFhMihgVFuA7WHrXlRx+1d+qKEcHrVJnM6YzQ77GlRn8XJpi6BGp+PnHoMII4uP0T8iyZhfwyCLUpMjwzdu7EGlQpsU2beKC8ujwf8DUEsDBBQAAAAIAIIQmVBNwtGG6wEAAOUDAAAsAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9jb252ZXJ0X2Jvb2wucHmFU9+LGjEQft+/YuCQzYKN2nsoCD7Yw5OCqPS8Qp+WuM56OWKyJNlT76/vJOu2a+2PECbJJPPlm2+SO3gw1dnK/Ysfw8fh6FMfptrJrUJYW/OKhU/uYCEL1A7HMF8+wxw1WqFgXW+VLNo9eLsHY0EJjxaYQ4SH1fr7l+U8eF+8r9x4MDgej3yva27sfqCaODfYV+rDPR9yf/KQJUlpzQFEQ4EfzK5WmNdeKsedPIE8VMZ62Eot7Dn35wr74PHk4/QfsXk41EZ7E5dJknxerRaz6fIp33x9nsEECOA9sPKMpee0D+kZXRiMDnYUjLc1xpHMiDof9mFDviz7hfY4XTz9DhcRtIloZRmGYTClUC7iBRdBDQPeY3B2AAnqiimvtTSaXd9H2iU7LGFrjEKh2ZtQNanjvJWFn0SK4wSoyRKkk9p5oQtsj4Woy35oFn1tNcTNJHq1sQeh5Dvu8uglTs3uXyHZz8L0uwXLOtf8AfRSnBYErTXWTVJX02RPrys3Nm9ySjOuzBEty3hwVCxLWjI3uFJfK3iTadAn+lD9L76pLj1rbfxF3hu4WMGGjhWSvseGMp+FXFjacyRWDBZBQrlrS8YBvnXXdE4Xqt7Rx+u5FHqkaFecjF4Ndf5qpGYWK8tkBiXxkl26Gb2jH1BLAwQUAAAACACCEJlQAAAAAAIAAAAAAAAAKAAAAGFuc2libGUvbW9kdWxlX3V0aWxzL3BhcnNpbmcvX19pbml0X18ucHkDAFBLAwQUAAAACACCEJlQxnERJegGAADWDwAAIgAAAGFuc2libGUvbW9kdWxlX3V0aWxzL3B5Y29tcGF0MjQucHmlV9tu48gRfddXFDwPkrIcemaSWWyUTBBaom0isqiQlL3GYiFQYstqDC9CN2lbWOy/51STlCiPvRkkejDJ7q5TVadu7XcUbaWmdZEIwnMXq5KKDTm5lqtUWLSqSl6Pc5J5InYCf/ISx7NdkePN7r2rAVhQrqs0VrSRqSCdy91OlBYkEyr5RHeZijzdWwx8EU4olWuRa5Ew2E2RVKnQtC8qelKyFFRpmT/UEAfQp61cb1lcZCuRJCKhZJ/HmVzHabqn1b61H4C6lGlKK5EWjFIASFBcldtCsZ/8lRmVtaVZvKdYa/mQ85bEmae8tQ9gjTy7nwrY9lSor7AaO+Nit1fyYVvSYD2kTx8+/mhRVOitLOhflYrLIpP09/Jr8/rPuLbPBtI/XpP+bNFNrGSl6UokmcxjbbQEIpG6VBJRkUVuLK40ApeTLiq1FmZlheNqT5tCZRpUyXJLcJafBYIJb+UGRDGABchYCdoJlcmyBI07VTxK5rPcxqXxdVOkafHEIVgXeSJZTBuhTJQjYxR+f3phmWZuG5NMamWVLkmJMpZ5HYFV8chbjds1ClFeIIkQChPtFICM09VrkqlrFLSu01hmQtlvmQKVHUpaU+BpUsG8V6xpzWit+l+socbRpFhXGQrF8N0CQvCc8w8HFFKuFErGqT6Sb2JmpDuO1HkWXXshhf5ldOcELuF9Hvi33sSd0MU9Nl0a+/P7wLu6jujan07cICRnNsHqLAq8i0XkY+HMCSF5xhtAdGb35P48D9wwJD8g72Y+9QAH/MCZRZ4bWuTNxtPFxJtdWQQImvkRTb0bL8KxyLeM2kYMeEdB8i/pxg3G1/h0LrypF90bYy69aMbaLqHOobkTRN54MXUCmi+CuR+6xK5NvHA8dbwbd8KB9WbQSu6tO4sovHam01d9ZftPPL1wYahzMXVrXfB04gXuOGKXmjcDPgaDsHFqUTh3xx6/uD+7cMkJ7q0GNXT/vcAhbNLEuXGu4N/gj4kBNKIzXgTuDdsNNsLFRRh50SJy6cr3J4bw0A1uvbEb/o2mfmg4W4SuBR2RY1QDAoRhG+8Xi9Bj6ozRkRsEi3nk+bMh/L8DObDTgfDEsOzPjMPgyQ/uGZaZMEGw6O7axXrAtBq+HKYkBG/jCNCdg9AJIqOOpzRzr6belTsbu7zrM86dF7pDRM0L+YBXK75zoHVh3MYZwMKy+qOTwZaJKXmX5ExuPTa9Pk7Ig9BrcsYQN75uaLd7PYnpgzml97rX6yViQw+iXIrntdhxoQyGox5X2dnZ2ZWom9i6Uorn1uEMQPjIcfblQiSa+zu3dIwnmu/RL3P6ZP8FCKqoHrb0Z/vZQlejJwDGOZoEN99aVQ1MbotvkRid0SDco/KfXaW42g+YwBmix6jXRTF//kgYBr3/ZH8e2kbay3Up4oRNMn2Np8FoVDvHv5fgo8OO2aUvL6nrtdSZJzp2pXIm2saZpcw3xWD4y8dfe71S7Wusd0fDfvzBrGxUkcEJ3BvqMKWY4ipOl+IxTnuNQZ7ZMr61MDE35TReC26X3KpPBOuBxMHRdXvcHeiwjcYWZluWOz06P89imdr1IbtQD+c7yVMOi+f14ntu6OefPnz46/sQFvFFQp1//vzxp58+2NsySxu4401Da9glzQUjRkKV5zv4KBpvcxKxSiXa+QA8DOlRKM2xxMCo1fV166YuYdaRKL5L4GakWrawb/FtqsmsBr6+LdQ3lSXGQaptLZ9bmXq8Lcv9DrNT5qV4EMp8Yf7zAMkf2q9SPJfmvY4z106X5UGOWlgWallLDY/p0mYE/8J4I8ACC1QYXsb7550CQ+wyQ/CFI25U88Tkuc+vcZMsnfxs5WyuRtHKQN7gHEYiX8z40shouhnFXNnH4XvAbPKx8YsRqzXSWOhRgw4e8orjzYRUuMnhydmARyLX/KBVUaQCtFsHVB72M9x57VcpWWpwssQVFFfXL/Rbn0/2R0bAon6kKv7iB74uMev50zx/P0DIDdJMop7jfC1eBOI0jMPTKj49CvUme75BQPKIL30OWn/4nUo5V91DhP6b2tMFmz+PjYgzbYnYoTBKo+cF2reG1OrHCHc5NOy/2Lc5/XBqcMhp67QQNmkRo5bqyzo67g+npfHCgE67O6Kfdsv0LSMjTqK38UyODbJ4N2gZsGod/EcPh9+pZYoUfVsJJ/D/r2OC/H9bB1fH4KBg8HVoHWP6OByapv3Voke+/BrlKMFMD75b+wz184p2HK9dwTZDd4rt28MdczvnfjkA/Pqdtiw4k8Jq9TYb70/y2eZWduqoiiXG8S0nkpl0g34Wp/w/CPpZXSSdSmxt7mJ2OnGvt1xi9iyXKLRB/2Ro99FTuk0coP8BUEsBAhQDFAAAAAgAghCZUO4XBlNjAAAAeQAAABMAAAAAAAAAAAAAAIABAAAAAGFuc2libGUvX19pbml0X18ucHlQSwECFAMUAAAACACCEJlQncXxazcAAABIAAAAIAAAAAAAAAAAAAAAgAGUAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvX19pbml0X18ucHlQSwECFAMUAAAACACCEJlQ8grsqVMRAADLSQAAFgAAAAAAAAAAAAAAgAEJAQAAYW5zaWJsZV9tb2R1bGVfc3RhdC5weVBLAQIUAxQAAAAIAIIQmVDCst0/7w0AAMkwAAAdAAAAAAAAAAAAAACAAZASAABhbnNpYmxlL21vZHVsZV91dGlscy9fdGV4dC5weVBLAQIUAxQAAAAIAIIQmVBs4rHkDW4AALW5AQAdAAAAAAAAAAAAAACAAbogAABhbnNpYmxlL21vZHVsZV91dGlscy9iYXNpYy5weVBLAQIUAxQAAAAIAIIQmVDcAQAiwB0AAI92AAAkAAAAAAAAAAAAAACAAQKPAABhbnNpYmxlL21vZHVsZV91dGlscy9zaXgvX19pbml0X18ucHlQSwECFAMUAAAACACCEJlQTcLRhusBAADlAwAALAAAAAAAAAAAAAAAgAEErQAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9jb252ZXJ0X2Jvb2wucHlQSwECFAMUAAAACACCEJlQAAAAAAIAAAAAAAAAKAAAAAAAAAAAAAAAgAE5rwAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9fX2luaXRfXy5weVBLAQIUAxQAAAAIAIIQmVDGcREl6AYAANYPAAAiAAAAAAAAAAAAAACAAYGvAABhbnNpYmxlL21vZHVsZV91dGlscy9weWNvbXBhdDI0LnB5UEsFBgAAAAAJAAkAuwIAAKm2AAAAAA=="""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen(['/usr/bin/python', module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, 'ansible_module_stat.py')
if command == 'explode':
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%s' % basedir)
exitcode = 0
elif command == 'execute':
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen(['/usr/bin/python', script_path, args_path],
env=os.environ, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
sys.argv = ['stat', args_path]
sys.path.insert(0, basedir)
from ansible_module_stat import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
ANSIBALLZ_PARAMS = '{"ANSIBLE_MODULE_ARGS": {"_ansible_version": "2.4.2.0", "_ansible_selinux_special_fs": ["fuse", "nfs", "vboxsf", "ramfs", "9p"], "_ansible_no_log": false, "_ansible_module_name": "stat", "_ansible_debug": false, "_ansible_verbosity": 0, "_ansible_syslog_facility": "LOG_USER", "_ansible_socket": null, "_ansible_shell_executable": "/bin/sh", "_ansible_diff": false, "get_checksum": false, "_ansible_check_mode": false, "checksum_algo": "sha1", "follow": false, "get_md5": false, "path": "/home/vagrant/.composer/composer.json"}}'
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod, mode='r')
module = os.path.join(temp_path, 'ansible_module_stat.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_stat.py'))
f.close()
z = zipfile.ZipFile(zipped_mod, mode='a')
sitecustomize = u'import sys\nsys.path.insert(0,"%s")\n' % zipped_mod
sitecustomize = sitecustomize.encode('utf-8')
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( 2020, 4, 25, 2, 4, 12)
z.writestr(zinfo, sitecustomize)
z.close()
exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
pass
sys.exit(exitcode) | []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
naturallanguageunderstandingv1/natural_language_understanding_v1_integration_test.go | // +build integration
package naturallanguageunderstandingv1_test
/**
* Copyright 2018 IBM All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"net/http"
"os"
"testing"
"github.com/IBM/go-sdk-core/core"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/watson-developer-cloud/go-sdk/naturallanguageunderstandingv1"
)
var service *naturallanguageunderstandingv1.NaturalLanguageUnderstandingV1
var serviceErr error
func init() {
err := godotenv.Load("../.env")
if err == nil {
service, serviceErr = naturallanguageunderstandingv1.
NewNaturalLanguageUnderstandingV1(&naturallanguageunderstandingv1.NaturalLanguageUnderstandingV1Options{
URL: os.Getenv("NATURAL_LANGUAGE_UNDERSTANDING_URL"),
Version: "2018-03-16",
Username: os.Getenv("NATURAL_LANGUAGE_UNDERSTANDING_USERNAME"),
Password: os.Getenv("NATURAL_LANGUAGE_UNDERSTANDING_PASSWORD"),
})
if serviceErr == nil {
customHeaders := http.Header{}
customHeaders.Add("X-Watson-Learning-Opt-Out", "1")
customHeaders.Add("X-Watson-Test", "1")
service.Service.SetDefaultHeaders(customHeaders)
}
}
}
func shouldSkipTest(t *testing.T) {
if service == nil {
t.Skip("Skipping test as service credentials are missing")
}
}
func TestAnalyze(t *testing.T) {
shouldSkipTest(t)
text := `IBM is an American multinational technology company
headquartered in Armonk, New York, United States
with operations in over 170 countries.`
response, responseErr := service.Analyze(
&naturallanguageunderstandingv1.AnalyzeOptions{
Text: &text,
Features: &naturallanguageunderstandingv1.Features{
Emotion: &naturallanguageunderstandingv1.EmotionOptions{
Document: core.BoolPtr(true),
},
Sentiment: &naturallanguageunderstandingv1.SentimentOptions{
Document: core.BoolPtr(true),
},
},
},
)
assert.Nil(t, responseErr)
analyze := service.GetAnalyzeResult(response)
assert.NotNil(t, analyze)
}
func TestListModels(t *testing.T) {
shouldSkipTest(t)
// list models
response, responseErr := service.ListModels(
&naturallanguageunderstandingv1.ListModelsOptions{},
)
assert.Nil(t, responseErr)
listModels := service.GetListModelsResult(response)
assert.NotNil(t, listModels)
}
| [
"\"NATURAL_LANGUAGE_UNDERSTANDING_URL\"",
"\"NATURAL_LANGUAGE_UNDERSTANDING_USERNAME\"",
"\"NATURAL_LANGUAGE_UNDERSTANDING_PASSWORD\""
]
| []
| [
"NATURAL_LANGUAGE_UNDERSTANDING_USERNAME",
"NATURAL_LANGUAGE_UNDERSTANDING_URL",
"NATURAL_LANGUAGE_UNDERSTANDING_PASSWORD"
]
| [] | ["NATURAL_LANGUAGE_UNDERSTANDING_USERNAME", "NATURAL_LANGUAGE_UNDERSTANDING_URL", "NATURAL_LANGUAGE_UNDERSTANDING_PASSWORD"] | go | 3 | 0 | |
test/test_missing_api_key_error.py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.banned_ip_address_error_details import BannedIpAddressErrorDetails
globals()['BannedIpAddressErrorDetails'] = BannedIpAddressErrorDetails
from cryptoapis.model.missing_api_key_error import MissingApiKeyError
class TestMissingApiKeyError(unittest.TestCase):
"""MissingApiKeyError unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMissingApiKeyError(self):
"""Test MissingApiKeyError"""
# FIXME: construct object with mandatory attributes with example values
# model = MissingApiKeyError() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| []
| []
| []
| [] | [] | python | null | null | null |
csrf/easy2.go | package csrf
import (
"database/sql"
"fmt"
"net/http"
"os"
"text/template"
"github.com/burpOverflow/VulnDoge/pkg/CheckErr"
"github.com/burpOverflow/VulnDoge/pkg/rand"
_ "github.com/go-sql-driver/mysql"
)
var csrfToken string
func Easy2(w http.ResponseWriter, r *http.Request) {
db, err := sql.Open("mysql", os.Getenv("MYSQL_URL"))
CheckErr.Check(err)
defer db.Close()
isSession, _ := SessionExist(r, db)
if isSession == true {
http.Redirect(w, r, "/csrf/easy2/myaccount/", 302)
return
}
tmpl := template.Must(template.ParseFiles("templates/csrf/easy1.html", "templates/base.html"))
tmpl.ExecuteTemplate(w, "easy1.html", struct {
Title string
Desc string
Login bool
User string
Sol bool
Lid string
}{Title: "CSRF token validation depends on token being present", Desc: `<p style="color:green;">This Application validate the token when it is present but is skip validation if token is omitted. Try to hack change password functionality:) </p><div class="container"><h3>Create Account</h3>
<form action='/csrf/easy2/create/' method='POST'>
<div class="mb-3">
<div class="mb-3">
<label for="username" class="form-label">Username</label>
<input type="username" class="form-control" name="username" required>
</div>
<label for="email" class="form-label">Email address</label>
<input type="email" class="form-control" id="exampleInputEmail1" name="email" required>
</div>
<div class="mb-3">
<label for="password" class="form-label">Password</label>
<input type="password" class="form-control" name="password" required>
</div>
<button type="submit" class="btn btn-primary">Submit</button>
</form>or <a href='/csrf/easy2/login/'>Login</a>
</div>`, Login: false, Sol: true, Lid: "a5"})
}
func CreateEasy2(w http.ResponseWriter, r *http.Request) {
Create(w, r, "easy2")
}
func LoginEasy2(w http.ResponseWriter, r *http.Request) {
db, err := sql.Open("mysql", os.Getenv("MYSQL_URL"))
CheckErr.Check(err)
defer db.Close()
Login(w, r, "easy2", "CSRF token validation depends on token being present", db)
}
func MyAccountEasy2(w http.ResponseWriter, r *http.Request) {
db, err := sql.Open("mysql", os.Getenv("MYSQL_URL"))
CheckErr.Check(err)
defer db.Close()
isSession, uname := SessionExist(r, db)
if isSession {
csrfToken = rand.String(20)
tmpl := template.Must(template.ParseFiles("templates/csrf/easy1.html", "templates/base.html"))
tmpl.ExecuteTemplate(w, "easy1.html", struct {
Title string
Desc string
Login bool
User string
Sol bool
LogoutUrl string
Lid string
}{Title: "CSRF token validation depends on token being present", Desc: `<h3>Welcome ` + uname + ` :)</h3><br><br><div class="container"><h4>Change Password</h4>
<form action='/csrf/easy2/changepassword/' method='POST'>
<div class="mb-3">
<input type="hidden" name="csrf-token" value="` + csrfToken + `">
<label for="newpassword" class="form-label">New Password</label>
<input type="password" class="form-control" name="newpassword" required>
</div>
<button type="submit" class="btn btn-primary">Submit</button>
</form>
</div>`, Login: isSession, User: uname, LogoutUrl: "/csrf/easy2/logout/", Sol: false, Lid: "nil"})
} else {
http.Redirect(w, r, "/csrf/easy2/", 302)
}
}
func LogoutEasy2(w http.ResponseWriter, r *http.Request) {
Logout(w, r, "easy2")
}
func ChangePasswordEasy2(w http.ResponseWriter, r *http.Request) {
newpassword := r.PostFormValue("newpassword")
db, err := sql.Open("mysql", os.Getenv("MYSQL_URL"))
CheckErr.Check(err)
defer db.Close()
clientCsrfToken := r.PostFormValue("csrf-token")
fmt.Println("server: ", csrfToken)
fmt.Println("client: ", clientCsrfToken)
fmt.Println()
if clientCsrfToken == "" {
fmt.Println("client csrf empty")
_, uname := SessionExist(r, db)
DBUpdatePassword(uname, newpassword, db)
http.Redirect(w, r, "/csrf/easy2/", 302)
return
} else {
if clientCsrfToken == csrfToken {
fmt.Println("client csrf empty")
_, uname := SessionExist(r, db)
DBUpdatePassword(uname, newpassword, db)
http.Redirect(w, r, "/csrf/easy2/", 302)
return
} else {
fmt.Fprintf(w, "Invalid CSRF-TOKEN")
return
}
}
// _, uname := SessionExist(r, db)
// DBUpdatePassword(uname, newpassword, db)
// http.Redirect(w, r, "/csrf/easy2/", 302)
}
| [
"\"MYSQL_URL\"",
"\"MYSQL_URL\"",
"\"MYSQL_URL\"",
"\"MYSQL_URL\""
]
| []
| [
"MYSQL_URL"
]
| [] | ["MYSQL_URL"] | go | 1 | 0 | |
hackerrank/Algorithms/Simple Array Sum/solution.py | #!/bin/python3
import os
#
# Complete the simpleArraySum function below.
#
def simpleArraySum(ar):
#
# Write your code here.
#
return sum(ar)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = simpleArraySum(ar)
fptr.write(str(result) + '\n')
fptr.close()
| []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
http/attestationpool_test.go | // Copyright ยฉ 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http_test
import (
"context"
"os"
"testing"
"time"
client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/http"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/stretchr/testify/require"
)
func TestAttestationPool(t *testing.T) {
tests := []struct {
name string
slot int64 // -1 for current
}{
{
name: "Good",
slot: -1,
},
}
service, err := http.New(context.Background(),
http.WithTimeout(timeout),
http.WithAddress(os.Getenv("HTTP_ADDRESS")),
)
require.NoError(t, err)
// Need to fetch current slot for attestation pools.
genesis, err := service.(client.GenesisProvider).Genesis(context.Background())
require.NoError(t, err)
slotDuration, err := service.(client.SlotDurationProvider).SlotDuration(context.Background())
require.NoError(t, err)
for _, test := range tests {
var slot phase0.Slot
if test.slot == -1 {
slot = phase0.Slot(uint64(time.Since(genesis.GenesisTime).Seconds()) / uint64(slotDuration.Seconds()))
} else {
slot = phase0.Slot(uint64(test.slot))
}
t.Run(test.name, func(t *testing.T) {
attestationPool, err := service.(client.AttestationPoolProvider).AttestationPool(context.Background(), slot)
require.NoError(t, err)
require.NotNil(t, attestationPool)
})
}
}
| [
"\"HTTP_ADDRESS\""
]
| []
| [
"HTTP_ADDRESS"
]
| [] | ["HTTP_ADDRESS"] | go | 1 | 0 | |
src/controls.py | import spotipy
import configparser
import os
from spotipy.oauth2 import SpotifyOAuth
class Controller:
def __init__(self, config:object):
"""Initiazlization for Controller object
Args:
config (object): configuration object
""" ''''''
self.config = config
self.auth = SpotifyOAuth(
scope=config['SETTINGS']['SCOPE'],
client_id= config['CREDENTIALS']['CLIENT_ID'],
client_secret=config['CREDENTIALS']['CLIENT_SECRET'],
redirect_uri=config['CREDENTIALS']['REDIRECT_URI']
)
self.device_id = self.config['SETTINGS']['DEVICE_ID']
self.sp = spotipy.Spotify(auth_manager = self.auth)
#self.sp.pause_playback()
#self.sp.start_playback()
#playlist = self.sp.current_user_playlists()
#print(playlist)
def play(self):
user_playlists = self.sp.current_user_playlists()
self.start_playback(user_playlists['items'][0]['uri'])
def currently_playing(self, device_id:str = None):
device_id = device_id if device_id else self.device_id
device_info = self.get_device_status(self.device_id)
if device_info and device_info['is_active']:
return self.sp.current_user_playing_track()
return None
def pause_playback(self, device_id:str=None ):
device_id = device_id if device_id else self.device_id
device_info = self.get_device_status(device_id)
if device_info:
if device_info['is_active']:
self.sp.pause_playback(device_id=device_id)
else:
raise Exception('Device not available')
def get_device_status(self, device_id:str)->object:
"""Get device info from spotify
Args:
device_id (str): device id
Returns:
object: device object
"""
devices = self.sp.devices()['devices']
devices = [ d for d in devices if d['id'] == device_id]
if len(devices)>0:
return devices[0]
else:
return None
def start_playback(self, context_uri:str=None, device_id:str = None ):
"""[summary]
Args:
uri (str): Context URI to play ( playlist, artist, etc)
device_id (str, optional): device id to start playback. Defaults to None.
Raises:
Exception: Device not available
"""
device_id = device_id if device_id else self.device_id
device_info = self.get_device_status(device_id)
if device_info:
if not device_info['is_active']:
self.sp.transfer_playback(device_id)
self.sp.start_playback(context_uri=context_uri, device_id=device_id)
else:
raise Exception('Device not available')
def get_auth(self)->object:
"""Get Spotify OAuth object
Returns:
object: Spotify OAuth obejct
"""
return self.auth
def get_new_access_token(self):
auth_code = self.auth.get_authorization_code()
self.auth.get_access_token(auth_code)
#config = configparser.ConfigParser()
#config.read("config/settings.ini")
#os.environ['SPOTIPY_CLIENT_ID'] = config['CREDENTIALS']['CLIENT_ID']
#os.environ['SPOTIPY_CLIENT_SECRET'] = config['CREDENTIALS']['CLIENT_SECRET']
#os.environ['SPOTIPY_REDIRECT_URI'] = config['CREDENTIALS']['REDIRECT_URI']
#ctrl = Controller(config) | []
| []
| [
"SPOTIPY_CLIENT_SECRET",
"SPOTIPY_CLIENT_ID",
"SPOTIPY_REDIRECT_URI"
]
| [] | ["SPOTIPY_CLIENT_SECRET", "SPOTIPY_CLIENT_ID", "SPOTIPY_REDIRECT_URI"] | python | 3 | 0 | |
tests/suites/external-service/external_service_test.go | package externalservice
import (
"fmt"
"os"
"strconv"
"testing"
"time"
"github.com/kudobuilder/test-tools/pkg/client"
"github.com/kudobuilder/test-tools/pkg/debug"
"github.com/kudobuilder/test-tools/pkg/kubernetes"
"github.com/kudobuilder/test-tools/pkg/kudo"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/mesosphere/kudo-cassandra-operator/tests/cassandra"
"github.com/mesosphere/kudo-cassandra-operator/tests/suites"
)
var (
TestName = "ext-service-test"
OperatorName = os.Getenv("OPERATOR_NAME")
TestNamespace = fmt.Sprintf("%s-namespace", TestName)
TestInstance = fmt.Sprintf("%s-instance", OperatorName)
KubeConfigPath = os.Getenv("KUBECONFIG")
KubectlPath = os.Getenv("KUBECTL_PATH")
OperatorDirectory = os.Getenv("OPERATOR_DIRECTORY")
NodeCount = 1
Client = client.Client{}
Operator = kudo.Operator{}
)
var _ = BeforeSuite(func() {
Client, _ = client.NewForConfig(KubeConfigPath)
_ = kubernetes.CreateNamespace(Client, TestNamespace)
})
var _ = AfterEach(func() {
debug.CollectArtifacts(Client, afero.NewOsFs(), GinkgoWriter, TestNamespace, KubectlPath)
})
var _ = AfterSuite(func() {
_ = Operator.Uninstall()
_ = kubernetes.DeleteNamespace(Client, TestNamespace)
})
func TestService(t *testing.T) {
RegisterFailHandler(Fail)
junitReporter := reporters.NewJUnitReporter(fmt.Sprintf(
"%s-junit.xml", TestName,
))
RunSpecsWithDefaultAndCustomReporters(t, TestName, []Reporter{junitReporter})
}
var _ = Describe("external service", func() {
It("Installs the operator from the current directory", func() {
var err error
parameters := map[string]string{
"NODE_COUNT": strconv.Itoa(NodeCount),
}
suites.SetSuitesParameters(parameters)
Operator, err = kudo.InstallOperator(OperatorDirectory).
WithNamespace(TestNamespace).
WithInstance(TestInstance).
WithParameters(parameters).
Do(Client)
Expect(err).To(BeNil())
err = Operator.Instance.WaitForPlanComplete("deploy")
Expect(err).To(BeNil())
suites.AssertNumberOfCassandraNodes(Client, Operator, NodeCount)
By("Allowing external access to the cassandra cluster")
nativeTransportPort := 9043
parameters = map[string]string{
"EXTERNAL_SERVICE": "true",
"EXTERNAL_NATIVE_TRANSPORT": "true",
"EXTERNAL_NATIVE_TRANSPORT_PORT": strconv.Itoa(nativeTransportPort),
}
suites.SetSuitesParameters(parameters)
err = Operator.Instance.UpdateParameters(parameters)
Expect(err).To(BeNil())
err = Operator.Instance.WaitForPlanComplete("deploy")
Expect(err).To(BeNil())
suites.AssertNumberOfCassandraNodes(Client, Operator, NodeCount)
log.Infof("Verify that external service is started and has 1 open port")
Eventually(func() bool {
svc, err := kubernetes.GetService(Client, fmt.Sprintf("%s-svc-external", TestInstance), TestNamespace)
if err != nil {
log.Infof("External Service Error: %v", err)
} else {
log.Infof("External Service, NumPorts: %d", len(svc.Spec.Ports))
if len(svc.Spec.Ports) > 0 {
log.Infof("External Service, Port 0: %+v", svc.Spec.Ports[0])
}
}
return err == nil && len(svc.Spec.Ports) == 1 && svc.Spec.Ports[0].Name == "native-transport" && svc.Spec.Ports[0].Port == int32(nativeTransportPort)
}, 2*time.Minute, 10*time.Second).Should(BeTrue())
By("Opening a second port if rpc is enabled")
rpcPort := 9161
parameters = map[string]string{
"START_RPC": "true",
"EXTERNAL_RPC": "true",
"EXTERNAL_RPC_PORT": strconv.Itoa(rpcPort),
}
suites.SetSuitesParameters(parameters)
err = Operator.Instance.UpdateParameters(parameters)
Expect(err).To(BeNil())
err = Operator.Instance.WaitForPlanComplete("deploy")
Expect(err).To(BeNil())
suites.AssertNumberOfCassandraNodes(Client, Operator, NodeCount)
log.Infof("Verify that external service is started and has 2 open ports")
Eventually(func() bool {
svc, _ := kubernetes.GetService(Client, fmt.Sprintf("%s-svc-external", TestInstance), TestNamespace)
if err != nil {
log.Infof("External Service Error: %v", err)
} else {
log.Infof("External Service, NumPorts: %d", len(svc.Spec.Ports))
if len(svc.Spec.Ports) > 1 {
log.Infof("External Service, Port 1: %v", svc.Spec.Ports[1])
}
}
return err == nil && len(svc.Spec.Ports) == 2 && svc.Spec.Ports[1].Name == "rpc" && svc.Spec.Ports[1].Port == int32(rpcPort)
}, 2*time.Minute, 10*time.Second).Should(BeTrue())
By("Disabling the external service again")
parameters = map[string]string{
"START_RPC": "false",
"EXTERNAL_SERVICE": "false",
"EXTERNAL_RPC": "false",
"EXTERNAL_NATIVE_TRANSPORT": "false",
}
suites.SetSuitesParameters(parameters)
err = Operator.Instance.UpdateParameters(parameters)
Expect(err).To(BeNil())
err = Operator.Instance.WaitForPlanComplete("deploy")
Expect(err).To(BeNil())
suites.AssertNumberOfCassandraNodes(Client, Operator, NodeCount)
Eventually(func() error {
_, err := kubernetes.GetService(Client, fmt.Sprintf("%s-svc-external", TestInstance), TestNamespace)
log.Infof("External Service Error: %v", err)
return err
}, 2*time.Minute, 10*time.Second).Should(Not(BeNil()))
})
It("Uninstalls the operator", func() {
err := cassandra.Uninstall(Client, Operator)
Expect(err).To(BeNil())
})
})
| [
"\"OPERATOR_NAME\"",
"\"KUBECONFIG\"",
"\"KUBECTL_PATH\"",
"\"OPERATOR_DIRECTORY\""
]
| []
| [
"OPERATOR_DIRECTORY",
"OPERATOR_NAME",
"KUBECTL_PATH",
"KUBECONFIG"
]
| [] | ["OPERATOR_DIRECTORY", "OPERATOR_NAME", "KUBECTL_PATH", "KUBECONFIG"] | go | 4 | 0 | |
go-restful/src/github.com/kataras/iris/plugins/iriscontrol/control_panel.go | // Copyright (c) 2016, Gerasimos Maropoulos
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package iriscontrol
import (
"github.com/kataras/iris"
"os"
"strconv"
"time"
)
var pathSeperator = string(os.PathSeparator)
var pluginPath = os.Getenv("GOPATH") + pathSeperator + "src" + pathSeperator + "github.com" + pathSeperator + "kataras" + pathSeperator + "iris" + pathSeperator + "plugins" + pathSeperator + "iriscontrol" + pathSeperator
var assetsUrl = "https://github.com/iris-contrib/iris-control-assets/archive/master.zip"
var assetsFolderName = "iris-control-assets-master"
var installationPath = pluginPath + assetsFolderName + pathSeperator
// for the plugin server
func (i *irisControlPlugin) startControlPanel() {
// install the assets first
if err := i.installAssets(); err != nil {
i.pluginContainer.Printf("[%s] %s Error %s: Couldn't install the assets from the internet,\n make sure you are connecting to the internet the first time running the iris-control plugin", time.Now().UTC().String(), Name, err.Error())
i.Destroy()
return
}
i.server = iris.Custom(iris.StationOptions{Cache: false, Profile: false, PathCorrection: true})
i.server.Templates(installationPath + "templates/*")
i.setPluginsInfo()
i.setPanelRoutes()
go i.server.Listen(strconv.Itoa(i.options.Port))
i.pluginContainer.Printf("[%s] %s is running at port %d with %d authenticated users", time.Now().UTC().String(), Name, i.options.Port, len(i.auth.authenticatedUsers))
}
type DashboardPage struct {
ServerIsRunning bool
Routes []RouteInfo
Plugins []PluginInfo
}
func (i *irisControlPlugin) setPluginsInfo() {
plugins := i.pluginContainer.GetAll()
i.plugins = make([]PluginInfo, 0, len(plugins))
for _, plugin := range plugins {
i.plugins = append(i.plugins, PluginInfo{Name: plugin.GetName(), Description: plugin.GetDescription()})
}
}
// installAssets checks if must install ,if yes download the zip and unzip it, returns error.
func (i *irisControlPlugin) installAssets() (err error) {
//we know already what is the zip folder inside it, so we can check if it's exists, if yes then don't install it again.
if i.pluginContainer.GetDownloader().DirectoryExists(installationPath) {
return
}
//set the installationPath ,although we know it but do it here too
installationPath, err = i.pluginContainer.GetDownloader().Install(assetsUrl, pluginPath)
return err
}
func (i *irisControlPlugin) setPanelRoutes() {
i.server.Get("/public/*assets", iris.Static(installationPath+"static"+pathSeperator, "/public/"))
i.server.Get("/login", func(ctx *iris.Context) {
ctx.RenderFile("login.html", nil)
})
i.server.Post("/login", func(ctx *iris.Context) {
i.auth.login(ctx)
})
i.server.Use(i.auth)
i.server.Get("/", func(ctx *iris.Context) {
ctx.RenderFile("index.html", DashboardPage{ServerIsRunning: i.station.Server.IsRunning, Routes: i.routes, Plugins: i.plugins})
})
i.server.Post("/logout", func(ctx *iris.Context) {
i.auth.logout(ctx)
})
//the controls
i.server.Post("/start_server", func(ctx *iris.Context) {
})
i.server.Post("/stop_server", func(ctx *iris.Context) {
})
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "drf_table_ex.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
keras_retinanet/bin/train.py | #!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
import warnings
import keras
import keras.preprocessing.image
import tensorflow as tf
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin # noqa: F401
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import layers # noqa: F401
from .. import losses
from .. import models
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..models.retinanet import retinanet_bbox
from ..preprocessing.csv_generator import CSVGenerator
from ..preprocessing.kitti import KittiGenerator
from ..preprocessing.open_images import OpenImagesGenerator
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..utils.anchors import make_shapes_callback
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
from ..utils.transform import random_transform_generator
from ..utils.image import random_visual_effect_generator
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
""" Load weights for model.
Args
model : The model to load weights for.
weights : The weights to load.
skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.
"""
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,
freeze_backbone=False, lr=1e-5, config=None):
""" Creates three models (model, training_model, prediction_model).
Args
backbone_retinanet : A function to call to create a retinanet model with a given backbone.
num_classes : The number of classes to train.
weights : The weights to load into the model.
multi_gpu : The number of GPUs to use for training.
freeze_backbone : If True, disables learning for the backbone.
config : Config parameters, None indicates the default configuration.
Returns
model : The base model. This is also the model that is saved in snapshots.
training_model : The training model. If multi_gpu=0, this is identical to model.
prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
"""
modifier = freeze_model if freeze_backbone else None
# load anchor parameters, or pass None (so that defaults will be used)
anchor_params = None
num_anchors = None
if config and 'anchor_parameters' in config:
anchor_params = parse_anchor_parameters(config)
num_anchors = anchor_params.num_anchors()
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
from keras.utils import multi_gpu_model
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
else:
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = model
# make prediction model
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
# compile model
# ์ด๋ ๊ฒ compile ํ๋ ๋จ๊ณ์์ ๊ฐ loss ๊ฐ์ ๋ฃ๋๋ค.
training_model.compile(
loss={
'regression' : losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from ..callbacks.coco import CocoEval
# use prediction model for evaluation
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)
),
verbose=1,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
min_delta = 0.0001,
cooldown = 0,
min_lr = 0
))
return callbacks
def create_generators(args, preprocess_image):
""" Create generators for training and validation.
Args
args : parseargs object containing configuration for generators.
preprocess_image : Function that preprocesses an image for the network.
"""
common_args = {
'batch_size' : args.batch_size,
'config' : args.config,
'image_min_side' : args.image_min_side,
'image_max_side' : args.image_max_side,
'preprocess_image' : preprocess_image,
}
# create random transform generator for augmenting training data
if args.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
visual_effect_generator = random_visual_effect_generator(
contrast_range=(0.9, 1.1),
brightness_range=(-.1, .1),
hue_range=(-0.05, 0.05),
saturation_range=(0.95, 1.05)
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
visual_effect_generator = None
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'pascal':
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'csv':
train_generator = CSVGenerator(
args.annotations,
args.classes,
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
if args.val_annotations:
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
shuffle_groups=False,
**common_args
)
else:
validation_generator = None
elif args.dataset_type == 'oid':
train_generator = OpenImagesGenerator(
args.main_dir,
subset='train',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
parent_label=args.parent_label,
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
validation_generator = OpenImagesGenerator(
args.main_dir,
subset='validation',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
parent_label=args.parent_label,
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'kitti':
train_generator = KittiGenerator(
args.kitti_path,
subset='train',
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
validation_generator = KittiGenerator(
args.kitti_path,
subset='val',
shuffle_groups=False,
**common_args
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
""" Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
parsed_args.multi_gpu))
if parsed_args.multi_gpu > 1 and parsed_args.snapshot:
raise ValueError(
"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.".format(parsed_args.multi_gpu,
parsed_args.snapshot))
if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:
raise ValueError("Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.")
if 'resnet' not in parsed_args.backbone:
warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))
return parsed_args
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
kitti_parser = subparsers.add_parser('kitti')
kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')
def csv_list(string):
return string.split(',')
oid_parser = subparsers.add_parser('oid')
oid_parser.add_argument('main_dir', help='Path to dataset directory.')
oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')
oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)
oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create object that stores backbone information
backbone = models.backbone(args.backbone)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# optionally load config parameters
if args.config:
args.config = read_config_file(args.config)
# create the generators
train_generator, validation_generator = create_generators(args, backbone.preprocess_image)
# create the model
if args.snapshot is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.snapshot, backbone_name=args.backbone)
training_model = model
anchor_params = None
if args.config and 'anchor_parameters' in args.config:
anchor_params = parse_anchor_parameters(args.config)
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = backbone.download_imagenet()
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone,
lr=args.lr,
config=args.config
)
# print model summary
print(model.summary())
# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in args.backbone or 'densenet' in args.backbone:
train_generator.compute_shapes = make_shapes_callback(model)
if validation_generator:
validation_generator.compute_shapes = train_generator.compute_shapes
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
validation_generator,
args,
)
if not args.compute_val_loss:
validation_generator = None
# start training
return training_model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator
)
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
dtrans/wsgi.py | """
WSGI config for project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = ".settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.local")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
sdk/keyvault/azkeys/internal/generated/time_unix.go | //go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package generated
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
type timeUnix time.Time
func (t timeUnix) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Time(t).Unix())
}
func (t *timeUnix) UnmarshalJSON(data []byte) error {
var seconds int64
if err := json.Unmarshal(data, &seconds); err != nil {
return err
}
*t = timeUnix(time.Unix(seconds, 0))
return nil
}
func (t timeUnix) String() string {
return fmt.Sprintf("%d", time.Time(t).Unix())
}
func populateTimeUnix(m map[string]interface{}, k string, t *time.Time) {
if t == nil {
return
} else if azcore.IsNullValue(t) {
m[k] = nil
return
} else if reflect.ValueOf(t).IsNil() {
return
}
m[k] = (*timeUnix)(t)
}
func unpopulateTimeUnix(data json.RawMessage, t **time.Time) error {
if data == nil || strings.EqualFold(string(data), "null") {
return nil
}
var aux timeUnix
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
*t = (*time.Time)(&aux)
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ODMC_exam_implementation.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/nfn-agent/nfn-agent.go | /*
* Copyright 2020 Intel Corporation, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"context"
"fmt"
"io"
"os"
"os/signal"
cs "ovn4nfv-k8s-plugin/internal/pkg/cniserver"
pb "ovn4nfv-k8s-plugin/internal/pkg/nfnNotify/proto"
"ovn4nfv-k8s-plugin/internal/pkg/ovn"
chaining "ovn4nfv-k8s-plugin/internal/pkg/utils"
"strings"
"syscall"
"time"
"google.golang.org/grpc"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
kexec "k8s.io/utils/exec"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
//"google.golang.org/grpc/keepalive"
"ovn4nfv-k8s-plugin/cmd/ovn4nfvk8s-cni/app"
"google.golang.org/grpc/status"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
var log = logf.Log.WithName("nfn-agent")
var errorChannel chan string
var inSync bool
var pnCreateStore []*pb.Notification_ProviderNwCreate
// subscribe Notifications
func subscribeNotif(client pb.NfnNotifyClient) error {
log.Info("Subscribe Notification from server")
ctx := context.Background()
var n pb.SubscribeContext
n.NodeName = os.Getenv("NFN_NODE_NAME")
for {
stream, err := client.Subscribe(ctx, &n, grpc.WaitForReady(true))
if err != nil {
log.Error(err, "Subscribe", "client", client, "status", status.Code(err))
continue
}
log.Info("Subscribe Notification success")
for {
in, err := stream.Recv()
if err == io.EOF {
// read done.
shutDownAgent("Stream closed")
return err
}
if err != nil {
log.Error(err, "Stream closed from server")
shutDownAgent("Stream closed from server")
return err
}
log.Info("Got message", "msg", in)
handleNotif(in)
}
}
}
func createVlanProvidernetwork(payload *pb.Notification_ProviderNwCreate) error {
var err error
vlanID := payload.ProviderNwCreate.GetVlan().GetVlanId()
ln := payload.ProviderNwCreate.GetVlan().GetLogicalIntf()
pn := payload.ProviderNwCreate.GetVlan().GetProviderIntf()
name := payload.ProviderNwCreate.GetProviderNwName()
if ln == "" {
ln = name + "." + vlanID
}
err = ovn.CreateVlan(vlanID, pn, ln)
if err != nil {
log.Error(err, "Unable to create VLAN", "vlan", ln)
return err
}
err = ovn.CreatePnBridge("nw_"+name, "br-"+name, ln)
if err != nil {
log.Error(err, "Unable to create vlan direct bridge", "vlan", pn)
return err
}
return nil
}
func createDirectProvidernetwork(payload *pb.Notification_ProviderNwCreate) error {
var err error
pn := payload.ProviderNwCreate.GetDirect().GetProviderIntf()
name := payload.ProviderNwCreate.GetProviderNwName()
err = ovn.CreatePnBridge("nw_"+name, "br-"+name, pn)
if err != nil {
log.Error(err, "Unable to create direct bridge", "direct", pn)
return err
}
return nil
}
func deleteVlanProvidernetwork(payload *pb.Notification_ProviderNwRemove) {
ln := payload.ProviderNwRemove.GetVlanLogicalIntf()
name := payload.ProviderNwRemove.GetProviderNwName()
ovn.DeleteVlan(ln)
ovn.DeletePnBridge("nw_"+name, "br-"+name)
}
func deleteDirectProvidernetwork(payload *pb.Notification_ProviderNwRemove) {
ln := payload.ProviderNwRemove.GetVlanLogicalIntf()
name := payload.ProviderNwRemove.GetProviderNwName()
ovn.DeleteVlan(ln)
ovn.DeletePnBridge("nw_"+name, "br-"+name)
}
func inSyncVlanProvidernetwork() {
var err error
// Read config from node
vlanList := ovn.GetVlan()
pnBridgeList := ovn.GetPnBridge("nfn")
diffVlan := make(map[string]bool)
diffPnBridge := make(map[string]bool)
VLAN:
for _, pn := range pnCreateStore {
if pn.ProviderNwCreate.GetVlan() != nil {
continue
}
id := pn.ProviderNwCreate.GetVlan().GetVlanId()
ln := pn.ProviderNwCreate.GetVlan().GetLogicalIntf()
pn := pn.ProviderNwCreate.GetVlan().GetProviderIntf()
if ln == "" {
ln = pn + "." + id
}
for _, vlan := range vlanList {
if vlan == ln {
// VLAN already present
diffVlan[vlan] = true
continue VLAN
}
}
// Vlan not found
err = ovn.CreateVlan(id, pn, ln)
if err != nil {
log.Error(err, "Unable to create VLAN", "vlan", ln)
return
}
}
PRNETWORK:
for _, pn := range pnCreateStore {
if pn.ProviderNwCreate.GetVlan() != nil {
continue
}
ln := pn.ProviderNwCreate.GetVlan().GetLogicalIntf()
name := pn.ProviderNwCreate.GetProviderNwName()
for _, br := range pnBridgeList {
pnName := strings.Replace(br, "br-", "", -1)
if name == pnName {
diffPnBridge[br] = true
continue PRNETWORK
}
}
// Provider Network not found
ovn.CreatePnBridge("nw_"+name, "br-"+name, ln)
}
// Delete VLAN not in the list
for _, vlan := range vlanList {
if diffVlan[vlan] == false {
ovn.DeleteVlan(vlan)
}
}
// Delete Provider Bridge not in the list
for _, br := range pnBridgeList {
if diffPnBridge[br] == false {
name := strings.Replace(br, "br-", "", -1)
ovn.DeletePnBridge("nw_"+name, "br-"+name)
}
}
}
func inSyncDirectProvidernetwork() {
// Read config from node
pnBridgeList := ovn.GetPnBridge("nfn")
diffPnBridge := make(map[string]bool)
DIRECTPRNETWORK:
for _, pn := range pnCreateStore {
if pn.ProviderNwCreate.GetDirect() != nil {
continue
}
pr := pn.ProviderNwCreate.GetDirect().GetProviderIntf()
name := pn.ProviderNwCreate.GetProviderNwName()
for _, br := range pnBridgeList {
pnName := strings.Replace(br, "br-", "", -1)
if name == pnName {
diffPnBridge[br] = true
continue DIRECTPRNETWORK
}
}
// Provider Network not found
ovn.CreatePnBridge("nw_"+name, "br-"+name, pr)
}
// Delete Provider Bridge not in the list
for _, br := range pnBridgeList {
if diffPnBridge[br] == false {
name := strings.Replace(br, "br-", "", -1)
ovn.DeletePnBridge("nw_"+name, "br-"+name)
}
}
}
func createNodeOVSInternalPort(payload *pb.Notification_InSync) error {
nodeIntfIPAddr := strings.Trim(strings.TrimSpace(payload.InSync.GetNodeIntfIpAddress()), "\"")
nodeIntfMacAddr := strings.Trim(strings.TrimSpace(payload.InSync.GetNodeIntfMacAddress()), "\"")
nodeName := os.Getenv("NFN_NODE_NAME")
err := app.CreateNodeOVSInternalPort(nodeIntfIPAddr, nodeIntfMacAddr, nodeName)
if err != nil {
return err
}
return nil
}
func handleNotif(msg *pb.Notification) {
switch msg.GetCniType() {
case "ovn4nfv":
switch payload := msg.Payload.(type) {
case *pb.Notification_ProviderNwCreate:
if !inSync {
// Store Msgs
pnCreateStore = append(pnCreateStore, payload)
return
}
if payload.ProviderNwCreate.GetVlan() != nil {
err := createVlanProvidernetwork(payload)
if err != nil {
return
}
}
if payload.ProviderNwCreate.GetDirect() != nil {
err := createDirectProvidernetwork(payload)
if err != nil {
return
}
}
case *pb.Notification_ProviderNwRemove:
if !inSync {
// Unexpected Remove message
return
}
if payload.ProviderNwRemove.GetVlanLogicalIntf() != "" {
deleteVlanProvidernetwork(payload)
}
if payload.ProviderNwRemove.GetDirectProviderIntf() != "" {
deleteDirectProvidernetwork(payload)
}
case *pb.Notification_ContainterRtInsert:
id := payload.ContainterRtInsert.GetContainerId()
pid, err := chaining.GetPidForContainer(id)
if err != nil {
log.Error(err, "Failed to get pid", "containerID", id)
return
}
err = chaining.ContainerAddRoute(pid, payload.ContainterRtInsert.GetRoute())
if err != nil {
return
}
case *pb.Notification_InSync:
inSyncVlanProvidernetwork()
inSyncDirectProvidernetwork()
pnCreateStore = nil
inSync = true
if payload.InSync.GetNodeIntfIpAddress() != "" && payload.InSync.GetNodeIntfMacAddress() != "" {
err := createNodeOVSInternalPort(payload)
if err != nil {
return
}
}
}
// Add other Types here
default:
log.Info("Not supported cni type", "cni type", msg.GetCniType())
}
}
func shutdownHandler(errorChannel <-chan string) {
// Register to receive term/int signal.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
signal.Notify(signalChan, syscall.SIGINT)
signal.Notify(signalChan, syscall.SIGHUP)
var reason string
select {
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Info("Received a SIGHUP")
}
reason = fmt.Sprintf("Received OS signal %v", sig)
case reason = <-errorChannel:
log.Info("Error", "reason", reason)
}
log.Info("nfn-agent is shutting down", "reason", reason)
}
func shutDownAgent(reason string) {
// Send a failure message and give few seconds complete shutdown.
log.Info("shutDownAgent recieved")
errorChannel <- reason
time.Sleep(10 * time.Second)
// The graceful shutdown failed, terminate the process.
panic("Shutdown failed. Panicking.")
}
func main() {
logf.SetLogger(zap.Logger(true))
log.Info("nfn-agent Started")
serverAddr := os.Getenv("NFN_OPERATOR_SERVICE_HOST") + ":" + os.Getenv("NFN_OPERATOR_SERVICE_PORT")
// Setup ovn utilities
exec := kexec.New()
err := ovn.SetExec(exec)
if err != nil {
log.Error(err, "Unable to setup OVN Utils")
return
}
conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
if err != nil {
log.Error(err, "fail to dial")
return
}
defer conn.Close()
client := pb.NewNfnNotifyClient(conn)
errorChannel = make(chan string)
// creates the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
log.Error(err, "Unable to create in-cluster config")
return
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Error(err, "Unable to create clientset for in-cluster config")
return
}
cniserver := cs.NewCNIServer("", clientset)
err = cniserver.Start(cs.HandleCNIcommandRequest)
if err != nil {
log.Error(err, "Unable to start cni server")
return
}
// Run client in background
go subscribeNotif(client)
shutdownHandler(errorChannel)
}
| [
"\"NFN_NODE_NAME\"",
"\"NFN_NODE_NAME\"",
"\"NFN_OPERATOR_SERVICE_HOST\"",
"\"NFN_OPERATOR_SERVICE_PORT\""
]
| []
| [
"NFN_OPERATOR_SERVICE_HOST",
"NFN_OPERATOR_SERVICE_PORT",
"NFN_NODE_NAME"
]
| [] | ["NFN_OPERATOR_SERVICE_HOST", "NFN_OPERATOR_SERVICE_PORT", "NFN_NODE_NAME"] | go | 3 | 0 | |
tests/util/test_package.py | """Test Home Assistant package util methods."""
import asyncio
import logging
import os
from subprocess import PIPE
import sys
from asynctest import MagicMock, call, patch
import pkg_resources
import pytest
import homeassistant.util.package as package
RESOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "resources")
)
TEST_NEW_REQ = "pyhelloworld3==1.0.0"
TEST_ZIP_REQ = "file://{}#{}".format(
os.path.join(RESOURCE_DIR, "pyhelloworld3.zip"), TEST_NEW_REQ
)
@pytest.fixture
def mock_sys():
"""Mock sys."""
with patch("homeassistant.util.package.sys", spec=object) as sys_mock:
sys_mock.executable = "python3"
yield sys_mock
@pytest.fixture
def deps_dir():
"""Return path to deps directory."""
return os.path.abspath("/deps_dir")
@pytest.fixture
def lib_dir(deps_dir):
"""Return path to lib directory."""
return os.path.join(deps_dir, "lib_dir")
@pytest.fixture
def mock_popen(lib_dir):
"""Return a Popen mock."""
with patch("homeassistant.util.package.Popen") as popen_mock:
popen_mock.return_value.communicate.return_value = (
bytes(lib_dir, "utf-8"),
b"error",
)
popen_mock.return_value.returncode = 0
yield popen_mock
@pytest.fixture
def mock_env_copy():
"""Mock os.environ.copy."""
with patch("homeassistant.util.package.os.environ.copy") as env_copy:
env_copy.return_value = {}
yield env_copy
@pytest.fixture
def mock_venv():
"""Mock homeassistant.util.package.is_virtual_env."""
with patch("homeassistant.util.package.is_virtual_env") as mock:
mock.return_value = True
yield mock
@asyncio.coroutine
def mock_async_subprocess():
"""Return an async Popen mock."""
async_popen = MagicMock()
@asyncio.coroutine
def communicate(input=None):
"""Communicate mock."""
stdout = bytes("/deps_dir/lib_dir", "utf-8")
return (stdout, None)
async_popen.communicate = communicate
return async_popen
def test_install(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install attempt on a package that doesn't exist."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ, False)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[mock_sys.executable, "-m", "pip", "install", "--quiet", TEST_NEW_REQ],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_upgrade(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an upgrade attempt on a package."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--upgrade",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target."""
target = "target_folder"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(target)
mock_venv.return_value = False
mock_sys.platform = "linux"
args = [
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--user",
"--prefix=",
]
assert package.install_package(TEST_NEW_REQ, False, target=target)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target_venv(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target in a virtual environment."""
target = "target_folder"
with pytest.raises(AssertionError):
package.install_package(TEST_NEW_REQ, False, target=target)
def test_install_error(caplog, mock_sys, mock_popen, mock_venv):
"""Test an install with a target."""
caplog.set_level(logging.WARNING)
mock_popen.return_value.returncode = 1
assert not package.install_package(TEST_NEW_REQ)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "ERROR"
def test_install_constraint(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with constraint file on not installed package."""
env = mock_env_copy()
constraints = "constraints_file.txt"
assert package.install_package(TEST_NEW_REQ, False, constraints=constraints)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--constraint",
constraints,
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_find_links(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with find-links on not installed package."""
env = mock_env_copy()
link = "https://wheels-repository"
assert package.install_package(TEST_NEW_REQ, False, find_links=link)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--find-links",
link,
"--prefer-binary",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
@asyncio.coroutine
def test_async_get_user_site(mock_env_copy):
"""Test async get user site directory."""
deps_dir = "/deps_dir"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(deps_dir)
args = [sys.executable, "-m", "site", "--user-site"]
with patch(
"homeassistant.util.package.asyncio.create_subprocess_exec",
return_value=mock_async_subprocess(),
) as popen_mock:
ret = yield from package.async_get_user_site(deps_dir)
assert popen_mock.call_count == 1
assert popen_mock.call_args == call(
*args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
env=env,
)
assert ret == os.path.join(deps_dir, "lib_dir")
def test_check_package_global():
"""Test for an installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
assert package.is_installed(installed_package)
def test_check_package_zip():
"""Test for an installed zip package."""
assert not package.is_installed(TEST_ZIP_REQ)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/android/FilePath.java | package com.hiddentao.cordova.filepath;
import android.text.TextUtils;
import android.Manifest;
import android.content.ContentUris;
import android.content.Context;
import android.content.pm.PackageManager;
import android.net.Uri;
import android.provider.OpenableColumns;
import android.util.Log;
import android.database.Cursor;
import android.os.Build;
import android.os.Environment;
import android.provider.DocumentsContract;
import android.provider.MediaStore;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.CordovaInterface;
import org.apache.cordova.CordovaPlugin;
import org.apache.cordova.CordovaWebView;
import org.apache.cordova.PermissionHelper;
import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONException;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.util.List;
import java.io.File;
public class FilePath extends CordovaPlugin {
private static final String TAG = "[FilePath plugin]: ";
private static final int INVALID_ACTION_ERROR_CODE = -1;
private static final int GET_PATH_ERROR_CODE = 0;
private static final String GET_PATH_ERROR_ID = null;
private static final int GET_CLOUD_PATH_ERROR_CODE = 1;
private static final String GET_CLOUD_PATH_ERROR_ID = "cloud";
private static final int RC_READ_EXTERNAL_STORAGE = 5;
private static CallbackContext callback;
private static String uriStr;
public static final int READ_REQ_CODE = 0;
public static final String READ = Manifest.permission.READ_EXTERNAL_STORAGE;
protected void getReadPermission(int requestCode) {
PermissionHelper.requestPermission(this, requestCode, READ);
}
public void initialize(CordovaInterface cordova, final CordovaWebView webView) {
super.initialize(cordova, webView);
}
/**
* Executes the request and returns PluginResult.
*
* @param action The action to execute.
* @param args JSONArry of arguments for the plugin.
* @param callbackContext The callback context through which to return stuff to caller.
* @return A PluginResult object with a status and message.
*/
@Override
public boolean execute(String action, JSONArray args, CallbackContext callbackContext) throws JSONException {
this.callback = callbackContext;
this.uriStr = args.getString(0);
if (action.equals("resolveNativePath")) {
if (PermissionHelper.hasPermission(this, READ)) {
resolveNativePath();
}
else {
getReadPermission(READ_REQ_CODE);
}
return true;
}
else {
JSONObject resultObj = new JSONObject();
resultObj.put("code", INVALID_ACTION_ERROR_CODE);
resultObj.put("message", "Invalid action.");
callbackContext.error(resultObj);
}
return false;
}
public void resolveNativePath() throws JSONException {
JSONObject resultObj = new JSONObject();
/* content:///... */
Uri pvUrl = Uri.parse(this.uriStr);
Log.d(TAG, "URI: " + this.uriStr);
Context appContext = this.cordova.getActivity().getApplicationContext();
String filePath = getPath(appContext, pvUrl);
//check result; send error/success callback
if (filePath == GET_PATH_ERROR_ID) {
resultObj.put("code", GET_PATH_ERROR_CODE);
resultObj.put("message", "Unable to resolve filesystem path.");
this.callback.error(resultObj);
}
else if (filePath.equals(GET_CLOUD_PATH_ERROR_ID)) {
resultObj.put("code", GET_CLOUD_PATH_ERROR_CODE);
resultObj.put("message", "Files from cloud cannot be resolved to filesystem, download is required.");
this.callback.error(resultObj);
}
else {
Log.d(TAG, "Filepath: " + filePath);
this.callback.success("file://" + filePath);
}
}
public void onRequestPermissionResult(int requestCode, String[] permissions, int[] grantResults) throws JSONException {
for (int r:grantResults) {
if (r == PackageManager.PERMISSION_DENIED) {
JSONObject resultObj = new JSONObject();
resultObj.put("code", 3);
resultObj.put("message", "Filesystem permission was denied.");
this.callback.error(resultObj);
return;
}
}
if (requestCode == READ_REQ_CODE) {
resolveNativePath();
}
}
/**
* @param uri The Uri to check.
* @return Whether the Uri authority is ExternalStorageProvider.
*/
private static boolean isExternalStorageDocument(Uri uri) {
return "com.android.externalstorage.documents".equals(uri.getAuthority());
}
/**
* @param uri The Uri to check.
* @return Whether the Uri authority is DownloadsProvider.
*/
private static boolean isDownloadsDocument(Uri uri) {
return "com.android.providers.downloads.documents".equals(uri.getAuthority());
}
/**
* @param uri The Uri to check.
* @return Whether the Uri authority is MediaProvider.
*/
private static boolean isMediaDocument(Uri uri) {
return "com.android.providers.media.documents".equals(uri.getAuthority());
}
/**
* @param uri The Uri to check.
* @return Whether the Uri authority is Google Photos.
*/
private static boolean isGooglePhotosUri(Uri uri) {
return ("com.google.android.apps.photos.content".equals(uri.getAuthority())
|| "com.google.android.apps.photos.contentprovider".equals(uri.getAuthority()));
}
/**
* @param uri The Uri to check.
* @return Whether the Uri authority is Google Drive.
*/
private static boolean isGoogleDriveUri(Uri uri) {
return "com.google.android.apps.docs.storage".equals(uri.getAuthority()) || "com.google.android.apps.docs.storage.legacy".equals(uri.getAuthority());
}
/**
* Get the value of the data column for this Uri. This is useful for
* MediaStore Uris, and other file-based ContentProviders.
*
* @param context The context.
* @param uri The Uri to query.
* @param selection (Optional) Filter used in the query.
* @param selectionArgs (Optional) Selection arguments used in the query.
* @return The value of the _data column, which is typically a file path.
*/
private static String getDataColumn(Context context, Uri uri, String selection,
String[] selectionArgs) {
Cursor cursor = null;
final String column = "_data";
final String[] projection = {
column
};
try {
cursor = context.getContentResolver().query(uri, projection, selection, selectionArgs,
null);
if (cursor != null && cursor.moveToFirst()) {
final int column_index = cursor.getColumnIndexOrThrow(column);
return cursor.getString(column_index);
}
} finally {
if (cursor != null)
cursor.close();
}
return null;
}
/**
* Get content:// from segment list
* In the new Uri Authority of Google Photos, the last segment is not the content:// anymore
* So let's iterate through all segments and find the content uri!
*
* @param segments The list of segment
*/
private static String getContentFromSegments(List<String> segments) {
String contentPath = "";
for(String item : segments) {
if (item.startsWith("content://")) {
contentPath = item;
break;
}
}
return contentPath;
}
/**
* Check if a file exists on device
*
* @param filePath The absolute file path
*/
private static boolean fileExists(String filePath) {
File file = new File(filePath);
return file.exists();
}
/**
* Get full file path from external storage
*
* @param pathData The storage type and the relative path
*/
private static String getPathFromExtSD(String[] pathData) {
final String type = pathData[0];
final String relativePath = "/" + pathData[1];
String fullPath;
// on my Sony devices (4.4.4 & 5.1.1), `type` is a dynamic string
// something like "71F8-2C0A", some kind of unique id per storage
// don't know any API that can get the root path of that storage based on its id.
//
// so no "primary" type, but let the check here for other devices
if ("primary".equalsIgnoreCase(type)) {
fullPath = Environment.getExternalStorageDirectory() + relativePath;
if (fileExists(fullPath)) {
return fullPath;
}
}
fullPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOCUMENTS) + relativePath;
if (fileExists(fullPath)) {
return fullPath;
}
// Environment.isExternalStorageRemovable() is `true` for external and internal storage
// so we cannot relay on it.
//
// instead, for each possible path, check if file exists
// we'll start with secondary storage as this could be our (physically) removable sd card
fullPath = System.getenv("SECONDARY_STORAGE") + relativePath;
if (fileExists(fullPath)) {
return fullPath;
}
fullPath = System.getenv("EXTERNAL_STORAGE") + relativePath;
if (fileExists(fullPath)) {
return fullPath;
}
return fullPath;
}
/**
* Get a file path from a Uri. This will get the the path for Storage Access
* Framework Documents, as well as the _data field for the MediaStore and
* other file-based ContentProviders.<br>
* <br>
* Callers should check whether the path is local before assuming it
* represents a local file.
*
* @param context The context.
* @param uri The Uri to query.
*/
private static String getPath(final Context context, final Uri uri) {
Log.d(TAG, "File - " +
"Authority: " + uri.getAuthority() +
", Fragment: " + uri.getFragment() +
", Port: " + uri.getPort() +
", Query: " + uri.getQuery() +
", Scheme: " + uri.getScheme() +
", Host: " + uri.getHost() +
", Segments: " + uri.getPathSegments().toString()
);
final boolean isKitKat = Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT;
// DocumentProvider
if (isKitKat && DocumentsContract.isDocumentUri(context, uri)) {
// ExternalStorageProvider
if (isExternalStorageDocument(uri)) {
final String docId = DocumentsContract.getDocumentId(uri);
final String[] split = docId.split(":");
final String type = split[0];
String fullPath = getPathFromExtSD(split);
if (fullPath != "") {
return fullPath;
}
else {
return null;
}
}
// DownloadsProvider
else if (isDownloadsDocument(uri)) {
// thanks to https://github.com/hiddentao/cordova-plugin-filepath/issues/34#issuecomment-430129959
Cursor cursor = null;
try {
cursor = context.getContentResolver().query(uri, new String[]{MediaStore.MediaColumns.DISPLAY_NAME}, null, null, null);
if (cursor != null && cursor.moveToFirst()) {
String fileName = cursor.getString(0);
String path = Environment.getExternalStorageDirectory().toString() + "/Download/" + fileName;
if (!TextUtils.isEmpty(path)) {
return path;
}
}
} finally {
if (cursor != null)
cursor.close();
}
//
final String id = DocumentsContract.getDocumentId(uri);
try {
final Uri contentUri = ContentUris.withAppendedId(
Uri.parse("content://downloads/public_downloads"), Long.valueOf(id));
return getDataColumn(context, contentUri, null, null);
} catch(NumberFormatException e) {
//In Android 8 and Android P the id is not a number
return uri.getPath().replaceFirst("^/document/raw:", "").replaceFirst("^raw:", "");
}
}
// MediaProvider
else if (isMediaDocument(uri)) {
final String docId = DocumentsContract.getDocumentId(uri);
final String[] split = docId.split(":");
final String type = split[0];
Uri contentUri = null;
if ("image".equals(type)) {
contentUri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI;
} else if ("video".equals(type)) {
contentUri = MediaStore.Video.Media.EXTERNAL_CONTENT_URI;
} else if ("audio".equals(type)) {
contentUri = MediaStore.Audio.Media.EXTERNAL_CONTENT_URI;
}
final String selection = "_id=?";
final String[] selectionArgs = new String[] {
split[1]
};
return getDataColumn(context, contentUri, selection, selectionArgs);
}
else if(isGoogleDriveUri(uri)){
return getDriveFilePath(uri,context);
}
}
// MediaStore (and general)
else if ("content".equalsIgnoreCase(uri.getScheme())) {
// Return the remote address
if (isGooglePhotosUri(uri)) {
String contentPath = getContentFromSegments(uri.getPathSegments());
if (contentPath != "") {
return getPath(context, Uri.parse(contentPath));
}
else {
return null;
}
}
if(isGoogleDriveUri(uri)){
return getDriveFilePath(uri,context);
}
return getDataColumn(context, uri, null, null);
}
// File
else if ("file".equalsIgnoreCase(uri.getScheme())) {
return uri.getPath();
}
return null;
}
private static String getDriveFilePath(Uri uri,Context context){
Uri returnUri =uri;
Cursor returnCursor = context.getContentResolver().query(returnUri, null, null, null, null);
/*
* Get the column indexes of the data in the Cursor,
* * move to the first row in the Cursor, get the data,
* * and display it.
* */
int nameIndex = returnCursor.getColumnIndex(OpenableColumns.DISPLAY_NAME);
int sizeIndex = returnCursor.getColumnIndex(OpenableColumns.SIZE);
returnCursor.moveToFirst();
String name = (returnCursor.getString(nameIndex));
String size = (Long.toString(returnCursor.getLong(sizeIndex)));
File file = new File(context.getCacheDir(),name);
try {
InputStream inputStream = context.getContentResolver().openInputStream(uri);
FileOutputStream outputStream = new FileOutputStream(file);
int read = 0;
int maxBufferSize = 1 * 1024 * 1024;
int bytesAvailable = inputStream.available();
//int bufferSize = 1024;
int bufferSize = Math.min(bytesAvailable, maxBufferSize);
final byte[] buffers = new byte[bufferSize];
while ((read = inputStream.read(buffers)) != -1) {
outputStream.write(buffers, 0, read);
}
Log.e("File Size","Size " + file.length());
inputStream.close();
outputStream.close();
Log.e("File Path","Path " + file.getPath());
Log.e("File Size","Size " + file.length());
}catch (Exception e){
Log.e("Exception",e.getMessage());
}
return file.getPath();
}
}
| [
"\"SECONDARY_STORAGE\"",
"\"EXTERNAL_STORAGE\""
]
| []
| [
"SECONDARY_STORAGE",
"EXTERNAL_STORAGE"
]
| [] | ["SECONDARY_STORAGE", "EXTERNAL_STORAGE"] | java | 2 | 0 | |
staging/src/k8s.io/legacy-cloud-providers/vsphere/vsphere_test.go | // +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"flag"
"io/ioutil"
"log"
"net/url"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"testing"
"github.com/vmware/govmomi/find"
lookup "github.com/vmware/govmomi/lookup/simulator"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/simulator"
"github.com/vmware/govmomi/simulator/vpx"
sts "github.com/vmware/govmomi/sts/simulator"
"github.com/vmware/govmomi/vapi/rest"
vapi "github.com/vmware/govmomi/vapi/simulator"
"github.com/vmware/govmomi/vapi/tags"
"github.com/vmware/govmomi/vim25/mo"
vmwaretypes "github.com/vmware/govmomi/vim25/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/legacy-cloud-providers/vsphere/vclib"
)
// localhostCert was generated from crypto/tls/generate_cert.go with the following command:
// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var localhostCert = `-----BEGIN CERTIFICATE-----
MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS
MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqWLX6S
4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOTheZ+
3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuNr3X9
erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyUVY/T
cukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+2EFa
a8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABo2gwZjAOBgNVHQ8BAf8EBAMC
AqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAuBgNVHREE
JzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG
9w0BAQsFAAOCAQEAThqgJ/AFqaANsOp48lojDZfZBFxJQ3A4zfR/MgggUoQ9cP3V
rxuKAFWQjze1EZc7J9iO1WvH98lOGVNRY/t2VIrVoSsBiALP86Eew9WucP60tbv2
8/zsBDSfEo9Wl+Q/gwdEh8dgciUKROvCm76EgAwPGicMAgRsxXgwXHhS5e8nnbIE
Ewaqvb5dY++6kh0Oz+adtNT5OqOwXTIRI67WuEe6/B3Z4LNVPQDIj7ZUJGNw8e6L
F4nkUthwlKx4yEJHZBRuFPnO7Z81jNKuwL276+mczRH7piI6z9uyMV/JbEsOIxyL
W6CzB7pZ9Nj1YLpgzc1r6oONHLokMJJIz/IvkQ==
-----END CERTIFICATE-----`
// localhostKey is the private key for localhostCert.
// Fake value for testing.
var localhostKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1Z5/aTwqY706M34tn60l8ZHkanWDl8mM1pYf4Q7qg3zA9XqW
LX6S4rTYDYCb4stEasC72lQnbEWHbthiQE76zubP8WOFHdvGR3mjAvHWz4FxvLOT
heZ+3iDUrl6Aj9UIsYqzmpBJAoY4+vGGf+xHvuukHrVcFqR9ZuBdZuJ/HbbjUyuN
r3X9erNIr5Ha17gVzf17SNbYgNrX9gbCeEB8Z9Ox7dVuJhLDkpF0T/B5Zld3BjyU
VY/TcukU4dTVp6isbWPvCMRCZCCOpb+qIhxEjJ0n6tnPt8nf9lvDl4SWMl6X1bH+
2EFaa8R06G0QI+XhwPyjXUyCR8QEOZPCR5wyqQIDAQABAoIBAFAJmb1pMIy8OpFO
hnOcYWoYepe0vgBiIOXJy9n8R7vKQ1X2f0w+b3SHw6eTd1TLSjAhVIEiJL85cdwD
MRTdQrXA30qXOioMzUa8eWpCCHUpD99e/TgfO4uoi2dluw+pBx/WUyLnSqOqfLDx
S66kbeFH0u86jm1hZibki7pfxLbxvu7KQgPe0meO5/13Retztz7/xa/pWIY71Zqd
YC8UckuQdWUTxfuQf0470lAK34GZlDy9tvdVOG/PmNkG4j6OQjy0Kmz4Uk7rewKo
ZbdphaLPJ2A4Rdqfn4WCoyDnxlfV861T922/dEDZEbNWiQpB81G8OfLL+FLHxyIT
LKEu4R0CgYEA4RDj9jatJ/wGkMZBt+UF05mcJlRVMEijqdKgFwR2PP8b924Ka1mj
9zqWsfbxQbdPdwsCeVBZrSlTEmuFSQLeWtqBxBKBTps/tUP0qZf7HjfSmcVI89WE
3ab8LFjfh4PtK/LOq2D1GRZZkFliqi0gKwYdDoK6gxXWwrumXq4c2l8CgYEA8vrX
dMuGCNDjNQkGXx3sr8pyHCDrSNR4Z4FrSlVUkgAW1L7FrCM911BuGh86FcOu9O/1
Ggo0E8ge7qhQiXhB5vOo7hiVzSp0FxxCtGSlpdp4W6wx6ZWK8+Pc+6Moos03XdG7
MKsdPGDciUn9VMOP3r8huX/btFTh90C/L50sH/cCgYAd02wyW8qUqux/0RYydZJR
GWE9Hx3u+SFfRv9aLYgxyyj8oEOXOFjnUYdY7D3KlK1ePEJGq2RG81wD6+XM6Clp
Zt2di0pBjYdi0S+iLfbkaUdqg1+ImLoz2YY/pkNxJQWQNmw2//FbMsAJxh6yKKrD
qNq+6oonBwTf55hDodVHBwKBgEHgEBnyM9ygBXmTgM645jqiwF0v75pHQH2PcO8u
Q0dyDr6PGjiZNWLyw2cBoFXWP9DYXbM5oPTcBMbfizY6DGP5G4uxzqtZHzBE0TDn
OKHGoWr5PG7/xDRrSrZOfe3lhWVCP2XqfnqoKCJwlOYuPws89n+8UmyJttm6DBt0
mUnxAoGBAIvbR87ZFXkvqstLs4KrdqTz4TQIcpzB3wENukHODPA6C1gzWTqp+OEe
GMNltPfGCLO+YmoMQuTpb0kECYV3k4jR3gXO6YvlL9KbY+UOA6P0dDX4ROi2Rklj
yh+lxFLYa1vlzzi9r8B7nkR9hrOGMvkfXF42X89g7lx4uMtu2I4q
-----END RSA PRIVATE KEY-----`
func configFromEnv() (cfg VSphereConfig, ok bool) {
var InsecureFlag bool
var err error
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
cfg.Global.User = os.Getenv("VSPHERE_USER")
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE")
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
if os.Getenv("VSPHERE_INSECURE") != "" {
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
} else {
InsecureFlag = false
}
if err != nil {
log.Fatal(err)
}
cfg.Global.InsecureFlag = InsecureFlag
ok = (cfg.Global.VCenterIP != "" &&
cfg.Global.User != "")
return
}
// configFromSim starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with an empty tls.Config.
func configFromSim() (VSphereConfig, func()) {
return configFromSimWithTLS(new(tls.Config), true)
}
// configFromSimWithTLS starts a vcsim instance and returns config for use against the vcsim instance.
// The vcsim instance is configured with a tls.Config. The returned client
// config can be configured to allow/decline insecure connections.
func configFromSimWithTLS(tlsConfig *tls.Config, insecureAllowed bool) (VSphereConfig, func()) {
var cfg VSphereConfig
model := simulator.VPX()
err := model.Create()
if err != nil {
log.Fatal(err)
}
model.Service.TLS = tlsConfig
s := model.Service.NewServer()
// STS simulator
path, handler := sts.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// vAPI simulator
path, handler = vapi.New(s.URL, vpx.Setting)
model.Service.ServeMux.Handle(path, handler)
// Lookup Service simulator
model.Service.RegisterSDK(lookup.New())
cfg.Global.InsecureFlag = insecureAllowed
cfg.Global.VCenterIP = s.URL.Hostname()
cfg.Global.VCenterPort = s.URL.Port()
cfg.Global.User = s.URL.User.Username()
cfg.Global.Password, _ = s.URL.User.Password()
cfg.Global.Datacenter = vclib.TestDefaultDatacenter
cfg.Network.PublicNetwork = vclib.TestDefaultNetwork
cfg.Global.DefaultDatastore = vclib.TestDefaultDatastore
cfg.Disk.SCSIControllerType = os.Getenv("VSPHERE_SCSICONTROLLER_TYPE")
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
if cfg.Global.WorkingDir == "" {
cfg.Global.WorkingDir = "vm" // top-level Datacenter.VmFolder
}
uuid := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine).Config.Uuid
getVMUUID = func() (string, error) { return uuid, nil }
return cfg, func() {
getVMUUID = GetVMUUID
s.Close()
model.Remove()
}
}
// configFromEnvOrSim returns config from configFromEnv if set, otherwise returns configFromSim.
func configFromEnvOrSim() (VSphereConfig, func()) {
cfg, ok := configFromEnv()
if ok {
return cfg, func() {}
}
return configFromSim()
}
func TestReadConfig(t *testing.T) {
_, err := readConfig(nil)
if err == nil {
t.Errorf("Should fail when no config is provided: %s", err)
}
// Fake values for testing.
cfg, err := readConfig(strings.NewReader(`
[Global]
server = 0.0.0.0
port = 443
user = user
password = password
insecure-flag = true
datacenter = us-west
vm-uuid = 1234
vm-name = vmname
ca-file = /some/path/to/a/ca.pem
`))
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
if cfg.Global.VCenterIP != "0.0.0.0" {
t.Errorf("incorrect vcenter ip: %s", cfg.Global.VCenterIP)
}
if cfg.Global.Datacenter != "us-west" {
t.Errorf("incorrect datacenter: %s", cfg.Global.Datacenter)
}
if cfg.Global.VMUUID != "1234" {
t.Errorf("incorrect vm-uuid: %s", cfg.Global.VMUUID)
}
if cfg.Global.VMName != "vmname" {
t.Errorf("incorrect vm-name: %s", cfg.Global.VMName)
}
if cfg.Global.CAFile != "/some/path/to/a/ca.pem" {
t.Errorf("incorrect ca-file: %s", cfg.Global.CAFile)
}
}
func TestNewVSphere(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
_, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
}
func TestVSphereLogin(t *testing.T) {
cfg, cleanup := configFromEnvOrSim()
defer cleanup()
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestVSphereLoginByToken(t *testing.T) {
cfg, cleanup := configFromSim()
defer cleanup()
// Configure for SAML token auth
cfg.Global.User = localhostCert
cfg.Global.Password = localhostKey
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
ctx := context.Background()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestVSphereLoginWithCaCert(t *testing.T) {
caCertPEM, err := ioutil.ReadFile("./vclib/testdata/ca.pem")
if err != nil {
t.Fatalf("Could not read ca cert from file")
}
serverCert, err := tls.LoadX509KeyPair("./vclib/testdata/server.pem", "./vclib/testdata/server.key")
if err != nil {
t.Fatalf("Could not load server cert and server key from files: %#v", err)
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok {
t.Fatalf("Cannot add CA to CAPool")
}
tlsConfig := tls.Config{
Certificates: []tls.Certificate{serverCert},
RootCAs: certPool,
}
cfg, cleanup := configFromSimWithTLS(&tlsConfig, false)
defer cleanup()
cfg.Global.CAFile = "./vclib/testdata/ca.pem"
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
ctx := context.Background()
// Create vSphere client
vcInstance, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vcInstance.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
vcInstance.conn.Logout(ctx)
}
func TestZonesNoConfig(t *testing.T) {
_, ok := new(VSphere).Zones()
if ok {
t.Fatalf("Zones() should return false without VCP configured")
}
}
func TestZones(t *testing.T) {
// Any context will do
ctx := context.Background()
// Create a vcsim instance
cfg, cleanup := configFromSim()
defer cleanup()
// Configure for SAML token auth
cfg.Global.User = localhostCert
cfg.Global.Password = localhostKey
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Configure region and zone categories
vs.cfg.Labels.Region = "k8s-region"
vs.cfg.Labels.Zone = "k8s-zone"
// Create vSphere client
vsi, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vsi.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
// Lookup Datacenter for this test's Workspace
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
t.Fatal(err)
}
// Lookup VM's host where we'll attach tags
host, err := dc.GetHostByVMUUID(ctx, vs.vmUUID)
if err != nil {
t.Fatal(err)
}
// Property Collector instance
pc := property.DefaultCollector(vsi.conn.Client)
// Tag manager instance
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
signer, err := vsi.conn.Signer(ctx, vsi.conn.Client)
if err != nil {
t.Fatal(err)
}
if err = m.LoginByToken(m.WithSigner(ctx, signer)); err != nil {
t.Fatal(err)
}
// Create a region category
regionID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
if err != nil {
t.Fatal(err)
}
// Create a region tag
regionID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: regionID, Name: "k8s-region-US"})
if err != nil {
t.Fatal(err)
}
// Create a zone category
zoneID, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Zone})
if err != nil {
t.Fatal(err)
}
// Create a zone tag
zoneID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: zoneID, Name: "k8s-zone-US-CA1"})
if err != nil {
t.Fatal(err)
}
// Create a random category
randomID, err := m.CreateCategory(ctx, &tags.Category{Name: "random-cat"})
if err != nil {
t.Fatal(err)
}
// Create a random tag
randomID, err = m.CreateTag(ctx, &tags.Tag{CategoryID: randomID, Name: "random-tag"})
if err != nil {
t.Fatal(err)
}
// Attach a random tag to VM's host
if err = m.AttachTag(ctx, randomID, host); err != nil {
t.Fatal(err)
}
// Expecting Zones() to return true, indicating VCP supports the Zones interface
zones, ok := vs.Zones()
if !ok {
t.Fatalf("zones=%t", ok)
}
// GetZone() tests, covering error and success paths
tests := []struct {
name string // name of the test for logging
fail bool // expect GetZone() to return error if true
prep func() // prepare vCenter state for the test
}{
{"no tags", true, func() {
// no prep
}},
{"no zone tag", true, func() {
if err = m.AttachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags set", false, func() {
if err = m.AttachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
}},
{"host tags removed", true, func() {
if err = m.DetachTag(ctx, zoneID, host); err != nil {
t.Fatal(err)
}
if err = m.DetachTag(ctx, regionID, host); err != nil {
t.Fatal(err)
}
}},
{"dc region, cluster zone", false, func() {
var h mo.HostSystem
if err = pc.RetrieveOne(ctx, host.Reference(), []string{"parent"}, &h); err != nil {
t.Fatal(err)
}
// Attach region tag to Datacenter
if err = m.AttachTag(ctx, regionID, dc); err != nil {
t.Fatal(err)
}
// Attach zone tag to Cluster
if err = m.AttachTag(ctx, zoneID, h.Parent); err != nil {
t.Fatal(err)
}
}},
}
for _, test := range tests {
test.prep()
zone, err := zones.GetZone(ctx)
if test.fail {
if err == nil {
t.Errorf("%s: expected error", test.name)
} else {
t.Logf("%s: expected error=%s", test.name, err)
}
} else {
if err != nil {
t.Errorf("%s: %s", test.name, err)
}
t.Logf("zone=%#v", zone)
}
}
}
func TestGetZoneToHosts(t *testing.T) {
// Common setup for all testcases in this test
ctx := context.TODO()
// Create a vcsim instance
cfg, cleanup := configFromSim()
defer cleanup()
// Create vSphere configuration object
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
// Configure region and zone categories
vs.cfg.Labels.Region = "k8s-region"
vs.cfg.Labels.Zone = "k8s-zone"
// Create vSphere client
vsi, ok := vs.vsphereInstanceMap[cfg.Global.VCenterIP]
if !ok {
t.Fatalf("Couldn't get vSphere instance: %s", cfg.Global.VCenterIP)
}
err = vsi.conn.Connect(ctx)
if err != nil {
t.Errorf("Failed to connect to vSphere: %s", err)
}
// Lookup Datacenter for this test's Workspace
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
if err != nil {
t.Fatal(err)
}
// Property Collector instance
pc := property.DefaultCollector(vsi.conn.Client)
// find all hosts in VC
finder := find.NewFinder(vsi.conn.Client, true)
finder.SetDatacenter(dc.Datacenter)
allVcHostsList, err := finder.HostSystemList(ctx, "*")
if err != nil {
t.Fatal(err)
}
var allVcHosts []vmwaretypes.ManagedObjectReference
for _, h := range allVcHostsList {
allVcHosts = append(allVcHosts, h.Reference())
}
// choose a cluster to apply zone/region tags
cluster := simulator.Map.Any("ClusterComputeResource")
var c mo.ClusterComputeResource
if err := pc.RetrieveOne(ctx, cluster.Reference(), []string{"host"}, &c); err != nil {
t.Fatal(err)
}
// choose one of the host inside this cluster to apply zone/region tags
if c.Host == nil || len(c.Host) == 0 {
t.Fatalf("This test needs a host inside a cluster.")
}
clusterHosts := c.Host
sortHosts(clusterHosts)
// pick the first host in the cluster to apply tags
host := clusterHosts[0]
remainingHostsInCluster := clusterHosts[1:]
// Tag manager instance
m := tags.NewManager(rest.NewClient(vsi.conn.Client))
user := url.UserPassword(vsi.conn.Username, vsi.conn.Password)
if err = m.Login(ctx, user); err != nil {
t.Fatal(err)
}
// Create a region category
regionCat, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Region})
if err != nil {
t.Fatal(err)
}
// Create a region tag
regionName := "k8s-region-US"
regionTag, err := m.CreateTag(ctx, &tags.Tag{CategoryID: regionCat, Name: regionName})
if err != nil {
t.Fatal(err)
}
// Create a zone category
zoneCat, err := m.CreateCategory(ctx, &tags.Category{Name: vs.cfg.Labels.Zone})
if err != nil {
t.Fatal(err)
}
// Create a zone tag
zone1Name := "k8s-zone-US-CA1"
zone1Tag, err := m.CreateTag(ctx, &tags.Tag{CategoryID: zoneCat, Name: zone1Name})
if err != nil {
t.Fatal(err)
}
zone1 := cloudprovider.Zone{FailureDomain: zone1Name, Region: regionName}
// Create a second zone tag
zone2Name := "k8s-zone-US-CA2"
zone2Tag, err := m.CreateTag(ctx, &tags.Tag{CategoryID: zoneCat, Name: zone2Name})
if err != nil {
t.Fatal(err)
}
zone2 := cloudprovider.Zone{FailureDomain: zone2Name, Region: regionName}
testcases := []struct {
name string
tags map[string][]mo.Reference
zoneToHosts map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference
}{
{
name: "Zone and Region tags on host",
tags: map[string][]mo.Reference{zone1Tag: {host}, regionTag: {host}},
zoneToHosts: map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference{zone1: {host}},
},
{
name: "Zone on host Region on datacenter",
tags: map[string][]mo.Reference{zone1Tag: {host}, regionTag: {dc}},
zoneToHosts: map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference{zone1: {host}},
},
{
name: "Zone on cluster Region on datacenter",
tags: map[string][]mo.Reference{zone1Tag: {cluster}, regionTag: {dc}},
zoneToHosts: map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference{zone1: clusterHosts},
},
{
name: "Zone on cluster and override on host",
tags: map[string][]mo.Reference{zone2Tag: {cluster}, zone1Tag: {host}, regionTag: {dc}},
zoneToHosts: map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference{zone1: {host}, zone2: remainingHostsInCluster},
},
{
name: "Zone and Region on datacenter",
tags: map[string][]mo.Reference{zone1Tag: {dc}, regionTag: {dc}},
zoneToHosts: map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference{zone1: allVcHosts},
},
}
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
// apply tags to datacenter/cluster/host as per this testcase
for tagId, objects := range testcase.tags {
for _, object := range objects {
if err := m.AttachTag(ctx, tagId, object); err != nil {
t.Fatal(err)
}
}
}
// run the test
zoneToHosts, err := vs.GetZoneToHosts(ctx, vsi)
if err != nil {
t.Errorf("unexpected error when calling GetZoneToHosts: %q", err)
}
// do not depend on the sort order of hosts in result
sortHostsMap(zoneToHosts)
if !reflect.DeepEqual(zoneToHosts, testcase.zoneToHosts) {
t.Logf("expected result: %+v", testcase.zoneToHosts)
t.Logf("actual result: %+v", zoneToHosts)
t.Error("unexpected result from GetZoneToHosts")
}
// clean up tags applied on datacenter/cluster/host for this testcase
for tagId, objects := range testcase.tags {
for _, object := range objects {
if err = m.DetachTag(ctx, tagId, object); err != nil {
t.Fatal(err)
}
}
}
})
}
}
func sortHostsMap(zoneToHosts map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference) {
for _, hosts := range zoneToHosts {
sortHosts(hosts)
}
}
func sortHosts(hosts []vmwaretypes.ManagedObjectReference) {
sort.Slice(hosts, func(i, j int) bool {
return hosts[i].Value < hosts[j].Value
})
}
func TestInstances(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
i, ok := vs.Instances()
if !ok {
t.Fatalf("Instances() returned false")
}
nodeName, err := vs.CurrentNodeName(context.TODO(), "")
if err != nil {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
nonExistingVM := types.NodeName(rand.String(15))
instanceID, err := i.InstanceID(context.TODO(), nodeName)
if err != nil {
t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err)
}
t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID)
_, err = i.InstanceID(context.TODO(), nonExistingVM)
if err == cloudprovider.InstanceNotFound {
t.Logf("VM %s was not found as expected\n", nonExistingVM)
} else if err == nil {
t.Fatalf("Instances.InstanceID did not fail as expected, VM %s was found", nonExistingVM)
} else {
t.Fatalf("Instances.InstanceID did not fail as expected, err: %v", err)
}
addrs, err := i.NodeAddresses(context.TODO(), nodeName)
if err != nil {
t.Fatalf("Instances.NodeAddresses(%s) failed: %s", nodeName, err)
}
found := false
for _, addr := range addrs {
if addr.Type == v1.NodeHostName {
found = true
}
}
if found == false {
t.Fatalf("NodeAddresses does not report hostname, %s %s", nodeName, addrs)
}
t.Logf("Found NodeAddresses(%s) = %s\n", nodeName, addrs)
}
func TestVolumes(t *testing.T) {
cfg, ok := configFromEnv()
if !ok {
t.Skipf("No config found in environment")
}
vs, err := newControllerNode(cfg)
if err != nil {
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
}
nodeName, err := vs.CurrentNodeName(context.TODO(), "")
if err != nil {
t.Fatalf("CurrentNodeName() failed: %s", err)
}
volumeOptions := &vclib.VolumeOptions{
CapacityKB: 1 * 1024 * 1024,
Tags: nil,
Name: "kubernetes-test-volume-" + rand.String(10),
DiskFormat: "thin"}
volPath, err := vs.CreateVolume(volumeOptions)
if err != nil {
t.Fatalf("Cannot create a new VMDK volume: %v", err)
}
_, err = vs.AttachDisk(volPath, "", "")
if err != nil {
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
}
err = vs.DetachDisk(volPath, "")
if err != nil {
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", volPath, nodeName, err)
}
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
// err = vs.DeleteVolume(volPath)
// if err != nil {
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
// }
}
func TestSecretVSphereConfig(t *testing.T) {
var vs *VSphere
var (
username = "user"
password = "password" // Fake value for testing.
)
var testcases = []struct {
testName string
conf string
expectedIsSecretProvided bool
expectedSecretNotManaged bool
expectedUsername string
expectedPassword string
expectedError error
expectedThumbprints map[string]string
}{
{
testName: "Username and password with old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace in old configuration",
conf: `[Global]
server = 0.0.0.0
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username and Password in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username missing in old configuration",
conf: `[Global]
server = 0.0.0.0
password = password
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
working-dir = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretNamespace missing with Username and Password in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
password = password
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretNamespace and Username missing in old configuration",
conf: `[Global]
server = 0.0.0.0
password = password
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrUsernameMissing,
},
{
testName: "SecretNamespace and Password missing in old configuration",
conf: `[Global]
server = 0.0.0.0
user = user
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrPasswordMissing,
},
{
testName: "SecretNamespace, Username and Password missing in old configuration",
conf: `[Global]
server = 0.0.0.0
datacenter = us-west
secret-name = "vccreds"
working-dir = kubernetes
`,
expectedError: ErrUsernameMissing,
},
{
testName: "Username and password with new configuration but username and password in global section",
conf: `[Global]
user = user
password = password
datacenter = us-west
[VirtualCenter "0.0.0.0"]
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "Username and password with new configuration, username and password in virtualcenter section",
conf: `[Global]
server = 0.0.0.0
port = 443
insecure-flag = true
datacenter = us-west
[VirtualCenter "0.0.0.0"]
user = user
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with new configuration",
conf: `[Global]
server = 0.0.0.0
secret-name = "vccreds"
secret-namespace = "kube-system"
datacenter = us-west
[VirtualCenter "0.0.0.0"]
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with new configuration, but non-managed",
conf: `[Global]
server = 0.0.0.0
secret-name = "vccreds"
secret-namespace = "kube-system"
secret-not-managed = true
datacenter = us-west
[VirtualCenter "0.0.0.0"]
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedSecretNotManaged: true,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "SecretName and SecretNamespace with Username missing in new configuration",
conf: `[Global]
server = 0.0.0.0
port = 443
insecure-flag = true
datacenter = us-west
secret-name = "vccreds"
secret-namespace = "kube-system"
[VirtualCenter "0.0.0.0"]
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedIsSecretProvided: true,
expectedError: nil,
},
{
testName: "virtual centers with a thumbprint",
conf: `[Global]
server = global
user = user
password = password
datacenter = us-west
thumbprint = "thumbprint:global"
working-dir = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"global": "thumbprint:global",
},
},
{
testName: "Multiple virtual centers with different thumbprints",
conf: `[Global]
user = user
password = password
datacenter = us-west
[VirtualCenter "0.0.0.0"]
thumbprint = thumbprint:0
[VirtualCenter "no_thumbprint"]
[VirtualCenter "1.1.1.1"]
thumbprint = thumbprint:1
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
expectedThumbprints: map[string]string{
"0.0.0.0": "thumbprint:0",
"1.1.1.1": "thumbprint:1",
},
},
{
testName: "Multiple virtual centers use the global CA cert",
conf: `[Global]
user = user
password = password
datacenter = us-west
ca-file = /some/path/to/my/trusted/ca.pem
[VirtualCenter "0.0.0.0"]
user = user
password = password
[VirtualCenter "1.1.1.1"]
user = user
password = password
[Workspace]
server = 0.0.0.0
datacenter = us-west
folder = kubernetes
`,
expectedUsername: username,
expectedPassword: password,
expectedError: nil,
},
}
for _, testcase := range testcases {
t.Logf("Executing Testcase: %s", testcase.testName)
cfg, err := readConfig(strings.NewReader(testcase.conf))
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
vs, err = buildVSphereFromConfig(cfg)
if err != testcase.expectedError {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
if err != nil {
continue
}
if vs.isSecretInfoProvided != testcase.expectedIsSecretProvided {
t.Fatalf("SecretName and SecretNamespace was expected in config %s. error: %s",
testcase.conf, err)
}
if !testcase.expectedIsSecretProvided {
for _, vsInstance := range vs.vsphereInstanceMap {
if vsInstance.conn.Username != testcase.expectedUsername {
t.Fatalf("Expected username %s doesn't match actual username %s in config %s. error: %s",
testcase.expectedUsername, vsInstance.conn.Username, testcase.conf, err)
}
if vsInstance.conn.Password != testcase.expectedPassword {
t.Fatalf("Expected password %s doesn't match actual password %s in config %s. error: %s",
testcase.expectedPassword, vsInstance.conn.Password, testcase.conf, err)
}
}
}
if testcase.expectedSecretNotManaged && vs.isSecretManaged {
t.Fatalf("Expected secret being non-managed but vs.isSecretManaged: %v", vs.isSecretManaged)
} else if !testcase.expectedSecretNotManaged && !vs.isSecretManaged {
t.Fatalf("Expected secret being managed but vs.isSecretManaged: %v", vs.isSecretManaged)
}
// Check, if all the expected thumbprints are configured
for instanceName, expectedThumbprint := range testcase.expectedThumbprints {
instanceConfig, ok := vs.vsphereInstanceMap[instanceName]
if !ok {
t.Fatalf("Could not find configuration for instance %s", instanceName)
}
if actualThumbprint := instanceConfig.conn.Thumbprint; actualThumbprint != expectedThumbprint {
t.Fatalf(
"Expected thumbprint for instance '%s' to be '%s', got '%s'",
instanceName, expectedThumbprint, actualThumbprint,
)
}
}
// Check, if all connections are configured with the global CA certificate
if expectedCaPath := cfg.Global.CAFile; expectedCaPath != "" {
for name, instance := range vs.vsphereInstanceMap {
if actualCaPath := instance.conn.CACert; actualCaPath != expectedCaPath {
t.Fatalf(
"Expected CA certificate path for instance '%s' to be the globally configured one ('%s'), got '%s'",
name, expectedCaPath, actualCaPath,
)
}
}
}
}
}
func fakeSecret(name, namespace, datacenter, user, password string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: map[string][]byte{
"vcenter." + datacenter + ".password": []byte(user),
"vcenter." + datacenter + ".username": []byte(password),
},
}
}
type buffer struct {
b bytes.Buffer
rw sync.RWMutex
}
func (b *buffer) String() string {
b.rw.RLock()
defer b.rw.RUnlock()
return b.b.String()
}
func (b *buffer) Write(p []byte) (n int, err error) {
b.rw.Lock()
defer b.rw.Unlock()
return b.b.Write(p)
}
func TestSecretUpdated(t *testing.T) {
datacenter := "0.0.0.0"
secretName := "vccreds"
secretNamespace := "kube-system"
username := "test-username"
password := "test-password"
basicSecret := fakeSecret(secretName, secretNamespace, datacenter, username, password)
cfg, cleanup := configFromSim()
defer cleanup()
cfg.Global.User = username
cfg.Global.Password = password
cfg.Global.Datacenter = datacenter
cfg.Global.SecretName = secretName
cfg.Global.SecretNamespace = secretNamespace
vsphere, err := buildVSphereFromConfig(cfg)
if err != nil {
t.Fatalf("Should succeed when a valid config is provided: %s", err)
}
klog.Flush()
klog.InitFlags(nil)
flag.Set("logtostderr", "false")
flag.Set("alsologtostderr", "false")
flag.Set("v", "9")
flag.Parse()
testcases := []struct {
name string
oldSecret *v1.Secret
secret *v1.Secret
expectOutput bool
expectErrOutput bool
}{
{
name: "Secrets are equal",
oldSecret: basicSecret.DeepCopy(),
secret: basicSecret.DeepCopy(),
expectOutput: false,
},
{
name: "Secret with a different name",
oldSecret: basicSecret.DeepCopy(),
secret: fakeSecret("different", secretNamespace, datacenter, username, password),
expectOutput: false,
},
{
name: "Secret with a different data",
oldSecret: basicSecret.DeepCopy(),
secret: fakeSecret(secretName, secretNamespace, datacenter, "...", "..."),
expectOutput: true,
},
{
name: "Secret being nil",
oldSecret: basicSecret.DeepCopy(),
secret: nil,
expectOutput: true,
expectErrOutput: true,
},
}
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
buf := new(buffer)
errBuf := new(buffer)
klog.SetOutputBySeverity("INFO", buf)
klog.SetOutputBySeverity("WARNING", errBuf)
vsphere.SecretUpdated(testcase.oldSecret, testcase.secret)
klog.Flush()
if testcase.expectOutput && len(buf.String()) == 0 {
t.Fatalf("Expected log secret update for secrets:\nOld:\n\t%+v\nNew\n\t%+v", testcase.oldSecret, testcase.secret)
} else if !testcase.expectOutput && len(buf.String()) > 0 {
t.Fatalf("Unexpected log messages for secrets:\nOld:\n\t%+v\n\nNew:\n\t%+v\nMessage:%s", testcase.oldSecret, testcase.secret, buf.String())
}
if testcase.expectErrOutput && len(errBuf.String()) == 0 {
t.Fatalf("Expected log error output on secret update for secrets:\nOld:\n\t%+v\nNew\n\t%+v", testcase.oldSecret, testcase.secret)
} else if !testcase.expectErrOutput && len(errBuf.String()) > 0 {
t.Fatalf("Unexpected log error messages for secrets:\nOld:\n\t%+v\n\nNew:\n\t%+v\nMessage:%s", testcase.oldSecret, testcase.secret, errBuf.String())
}
})
}
}
| [
"\"VSPHERE_VCENTER\"",
"\"VSPHERE_VCENTER_PORT\"",
"\"VSPHERE_USER\"",
"\"VSPHERE_PASSWORD\"",
"\"VSPHERE_DATACENTER\"",
"\"VSPHERE_PUBLIC_NETWORK\"",
"\"VSPHERE_DATASTORE\"",
"\"VSPHERE_SCSICONTROLLER_TYPE\"",
"\"VSPHERE_WORKING_DIR\"",
"\"VSPHERE_VM_NAME\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_INSECURE\"",
"\"VSPHERE_SCSICONTROLLER_TYPE\"",
"\"VSPHERE_WORKING_DIR\"",
"\"VSPHERE_VM_NAME\""
]
| []
| [
"VSPHERE_VCENTER",
"VSPHERE_WORKING_DIR",
"VSPHERE_INSECURE",
"VSPHERE_DATACENTER",
"VSPHERE_SCSICONTROLLER_TYPE",
"VSPHERE_USER",
"VSPHERE_PASSWORD",
"VSPHERE_PUBLIC_NETWORK",
"VSPHERE_DATASTORE",
"VSPHERE_VM_NAME",
"VSPHERE_VCENTER_PORT"
]
| [] | ["VSPHERE_VCENTER", "VSPHERE_WORKING_DIR", "VSPHERE_INSECURE", "VSPHERE_DATACENTER", "VSPHERE_SCSICONTROLLER_TYPE", "VSPHERE_USER", "VSPHERE_PASSWORD", "VSPHERE_PUBLIC_NETWORK", "VSPHERE_DATASTORE", "VSPHERE_VM_NAME", "VSPHERE_VCENTER_PORT"] | go | 11 | 0 | |
utils/utils.go | package utils
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"index/suffixarray"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(1)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[debug] %s:%d %s\n", file, line, format), a...)
}
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
sf *StreamFormatter
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
updateEvery := 1024 * 512 //512kB
if r.readTotal > 0 {
// Update progress for every 1% read if 1% < 512kB
if increment := int(0.01 * float64(r.readTotal)); increment < updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err != nil {
r.output.Write(r.sf.FormatStatus(""))
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template []byte, sf *StreamFormatter) *progressReader {
tpl := string(template)
if tpl == "" {
tpl = string(sf.FormatProgress("", "%8v/%v (%v)"))
}
return &progressReader{r, NewWriteFlusher(output), size, 0, 0, tpl, sf}
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%f years", d.Hours()/24/365)
}
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
func HumanSize(size int64) string {
i := 0
var sizef float64
sizef = float64(size)
units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
for sizef >= 1000.0 {
sizef = sizef / 1000.0
i++
}
return fmt.Sprintf("%.4g %s", sizef, units[i])
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
panic(err)
}
return path
}
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
}
func NewBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.Lock()
defer r.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type WriteBroadcaster struct {
sync.Mutex
buf *bytes.Buffer
writers map[StreamWriter]bool
}
type StreamWriter struct {
wc io.WriteCloser
stream string
}
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
w.Lock()
sw := StreamWriter{wc: writer, stream: stream}
w.writers[sw] = true
w.Unlock()
}
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
w.Lock()
defer w.Unlock()
w.buf.Write(p)
for sw := range w.writers {
lp := p
if sw.stream != "" {
lp = nil
for {
line, err := w.buf.ReadString('\n')
if err != nil {
w.buf.Write([]byte(line))
break
}
b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now()})
if err != nil {
// On error, evict the writer
delete(w.writers, sw)
continue
}
lp = append(lp, b...)
}
}
if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
// On error, evict the writer
delete(w.writers, sw)
}
}
return len(p), nil
}
func (w *WriteBroadcaster) CloseWriters() error {
w.Lock()
defer w.Unlock()
for sw := range w.writers {
sw.wc.Close()
}
w.writers = make(map[StreamWriter]bool)
return nil
}
func NewWriteBroadcaster() *WriteBroadcaster {
return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
}
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Debugf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
index *suffixarray.Index
ids map[string]bool
bytes []byte
}
func NewTruncIndex() *TruncIndex {
return &TruncIndex{
index: suffixarray.New([]byte{' '}),
ids: make(map[string]bool),
bytes: []byte{' '},
}
}
func (idx *TruncIndex) Add(id string) error {
if strings.Contains(id, " ") {
return fmt.Errorf("Illegal character: ' '")
}
if _, exists := idx.ids[id]; exists {
return fmt.Errorf("Id already exists: %s", id)
}
idx.ids[id] = true
idx.bytes = append(idx.bytes, []byte(id+" ")...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) Delete(id string) error {
if _, exists := idx.ids[id]; !exists {
return fmt.Errorf("No such id: %s", id)
}
before, after, err := idx.lookup(id)
if err != nil {
return err
}
delete(idx.ids, id)
idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
idx.index = suffixarray.New(idx.bytes)
return nil
}
func (idx *TruncIndex) lookup(s string) (int, int, error) {
offsets := idx.index.Lookup([]byte(" "+s), -1)
//log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
return -1, -1, fmt.Errorf("No such id: %s", s)
}
offsetBefore := offsets[0] + 1
offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
return offsetBefore, offsetAfter, nil
}
func (idx *TruncIndex) Get(s string) (string, error) {
before, after, err := idx.lookup(s)
//log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
if err != nil {
return "", err
}
return string(idx.bytes[before:after]), err
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, io.EOF
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
func (k *KernelVersionInfo) String() string {
flavor := ""
if len(k.Flavor) > 0 {
flavor = fmt.Sprintf("-%s", k.Flavor)
}
return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if len(parts) == 6 && parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}
func GetKernelVersion() (*KernelVersionInfo, error) {
var (
flavor string
kernel, major, minor int
err error
)
uts, err := uname()
if err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
tmp := strings.SplitN(string(release), "-", 2)
tmp2 := strings.SplitN(tmp[0], ".", 3)
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
// Removes "+" because git kernels might set it
minorUnparsed := strings.Trim(tmp2[2], "+")
minor, err = strconv.Atoi(minorUnparsed)
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}
// FIXME: this is deprecated by CopyWithTar in archive.go
func CopyDirectory(source, dest string) error {
if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
return fmt.Errorf("Error copy: %s (%s)", err, output)
}
return nil
}
type NopFlusher struct{}
func (f *NopFlusher) Flush() {}
type WriteFlusher struct {
w io.Writer
flusher http.Flusher
}
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
n, err = wf.w.Write(b)
wf.flusher.Flush()
return n, err
}
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}
type JSONMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
Error string `json:"error,omitempty"`
ID string `json:"id,omitempty"`
Time int64 `json:"time,omitempty"`
}
func (jm *JSONMessage) Display(out io.Writer) (error) {
if jm.Time != 0 {
fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0))
}
if jm.Progress != "" {
fmt.Fprintf(out, "%s %s\r", jm.Status, jm.Progress)
} else if jm.Error != "" {
return fmt.Errorf(jm.Error)
} else if jm.ID != "" {
fmt.Fprintf(out, "%s: %s\n", jm.ID, jm.Status)
} else {
fmt.Fprintf(out, "%s\n", jm.Status)
}
return nil
}
type StreamFormatter struct {
json bool
used bool
}
func NewStreamFormatter(json bool) *StreamFormatter {
return &StreamFormatter{json, false}
}
func (sf *StreamFormatter) FormatStatus(format string, a ...interface{}) []byte {
sf.used = true
str := fmt.Sprintf(format, a...)
if sf.json {
b, err := json.Marshal(&JSONMessage{Status: str})
if err != nil {
return sf.FormatError(err)
}
return b
}
return []byte(str + "\r\n")
}
func (sf *StreamFormatter) FormatError(err error) []byte {
sf.used = true
if sf.json {
if b, err := json.Marshal(&JSONMessage{Error: err.Error()}); err == nil {
return b
}
return []byte("{\"error\":\"format error\"}")
}
return []byte("Error: " + err.Error() + "\r\n")
}
func (sf *StreamFormatter) FormatProgress(action, str string) []byte {
sf.used = true
if sf.json {
b, err := json.Marshal(&JSONMessage{Status: action, Progress: str})
if err != nil {
return nil
}
return b
}
return []byte(action + " " + str + "\r")
}
func (sf *StreamFormatter) Used() bool {
return sf.used
}
func IsURL(str string) bool {
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
}
func IsGIT(str string) bool {
return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
}
func CheckLocalDns() bool {
resolv, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
Debugf("Error openning resolv.conf: %s", err)
return false
}
for _, ip := range []string{
"127.0.0.1",
"127.0.1.1",
} {
if strings.Contains(string(resolv), ip) {
return true
}
}
return false
}
func ParseHost(host string, port int, addr string) string {
if strings.HasPrefix(addr, "unix://") {
return addr
}
if strings.HasPrefix(addr, "tcp://") {
addr = strings.TrimPrefix(addr, "tcp://")
}
if strings.Contains(addr, ":") {
hostParts := strings.Split(addr, ":")
if len(hostParts) != 2 {
log.Fatal("Invalid bind address format.")
os.Exit(-1)
}
if hostParts[0] != "" {
host = hostParts[0]
}
if p, err := strconv.Atoi(hostParts[1]); err == nil {
port = p
}
} else {
host = addr
}
return fmt.Sprintf("tcp://%s:%d", host, port)
}
// Get a repos name and returns the right reposName + tag
// The tag can be confusing because of a port in a repository name.
// Ex: localhost.localdomain:5000/samalba/hipache:latest
func ParseRepositoryTag(repos string) (string, string) {
n := strings.LastIndex(repos, ":")
if n < 0 {
return repos, ""
}
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
return repos[:n], tag
}
return repos, ""
}
// UserLookup check if the given username or uid is present in /etc/passwd
// and returns the user struct.
// If the username is not found, an error is returned.
func UserLookup(uid string) (*user.User, error) {
file, err := ioutil.ReadFile("/etc/passwd")
if err != nil {
return nil, err
}
for _, line := range strings.Split(string(file), "\n") {
data := strings.Split(line, ":")
if len(data) > 5 && (data[0] == uid || data[2] == uid) {
return &user.User{
Uid: data[2],
Gid: data[3],
Username: data[0],
Name: data[4],
HomeDir: data[5],
}, nil
}
}
return nil, fmt.Errorf("User not found in /etc/passwd")
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
vendor/fyne.io/fyne/v2/internal/cache/base.go | package cache
import (
"os"
"sync"
"time"
"fyne.io/fyne/v2"
)
var (
cacheDuration = 1 * time.Minute
cleanTaskInterval = cacheDuration / 2
expiredObjects = make([]fyne.CanvasObject, 0, 50)
lastClean time.Time
skippedCleanWithCanvasRefresh = false
// testing purpose only
timeNow func() time.Time = time.Now
)
func init() {
if t, err := time.ParseDuration(os.Getenv("FYNE_CACHE")); err == nil {
cacheDuration = t
cleanTaskInterval = cacheDuration / 2
}
}
// Clean run cache clean task, it should be called on paint events.
func Clean(canvasRefreshed bool) {
now := timeNow()
// do not run clean task too fast
if now.Sub(lastClean) < 10*time.Second {
if canvasRefreshed {
skippedCleanWithCanvasRefresh = true
}
return
}
if skippedCleanWithCanvasRefresh {
skippedCleanWithCanvasRefresh = false
canvasRefreshed = true
}
if !canvasRefreshed && now.Sub(lastClean) < cleanTaskInterval {
return
}
destroyExpiredSvgs(now)
if canvasRefreshed {
// Destroy renderers on canvas refresh to avoid flickering screen.
destroyExpiredRenderers(now)
// canvases cache should be invalidated only on canvas refresh, otherwise there wouldn't
// be a way to recover them later
destroyExpiredCanvases(now)
}
lastClean = timeNow()
}
// CleanCanvas performs a complete remove of all the objects that belong to the specified
// canvas. Usually used to free all objects from a closing windows.
func CleanCanvas(canvas fyne.Canvas) {
deletingObjs := make([]fyne.CanvasObject, 0, 50)
canvasesLock.RLock()
for obj, cinfo := range canvases {
if cinfo.canvas == canvas {
deletingObjs = append(deletingObjs, obj)
}
}
canvasesLock.RUnlock()
if len(deletingObjs) == 0 {
return
}
canvasesLock.Lock()
for _, dobj := range deletingObjs {
delete(canvases, dobj)
}
canvasesLock.Unlock()
renderersLock.Lock()
for _, dobj := range deletingObjs {
wid, ok := dobj.(fyne.Widget)
if !ok {
continue
}
winfo, ok := renderers[wid]
if !ok {
continue
}
winfo.renderer.Destroy()
delete(renderers, wid)
}
renderersLock.Unlock()
}
// destroyExpiredCanvases deletes objects from the canvases cache.
func destroyExpiredCanvases(now time.Time) {
expiredObjects = expiredObjects[:0]
canvasesLock.RLock()
for obj, cinfo := range canvases {
if cinfo.isExpired(now) {
expiredObjects = append(expiredObjects, obj)
}
}
canvasesLock.RUnlock()
if len(expiredObjects) > 0 {
canvasesLock.Lock()
for i, exp := range expiredObjects {
delete(canvases, exp)
expiredObjects[i] = nil
}
canvasesLock.Unlock()
}
}
// destroyExpiredRenderers deletes the renderer from the cache and calls
// renderer.Destroy()
func destroyExpiredRenderers(now time.Time) {
expiredObjects = expiredObjects[:0]
renderersLock.RLock()
for wid, rinfo := range renderers {
if rinfo.isExpired(now) {
rinfo.renderer.Destroy()
expiredObjects = append(expiredObjects, wid)
}
}
renderersLock.RUnlock()
if len(expiredObjects) > 0 {
renderersLock.Lock()
for i, exp := range expiredObjects {
delete(renderers, exp.(fyne.Widget))
expiredObjects[i] = nil
}
renderersLock.Unlock()
}
}
// destroyExpiredSvgs destroys expired svgs cache data.
func destroyExpiredSvgs(now time.Time) {
expiredSvgs := make([]string, 0, 20)
svgLock.RLock()
for s, sinfo := range svgs {
if sinfo.isExpired(now) {
expiredSvgs = append(expiredSvgs, s)
}
}
svgLock.RUnlock()
if len(expiredSvgs) > 0 {
svgLock.Lock()
for _, exp := range expiredSvgs {
delete(svgs, exp)
}
svgLock.Unlock()
}
}
type expiringCache struct {
expireLock sync.RWMutex
expires time.Time
}
// isExpired check if the cache data is expired.
func (c *expiringCache) isExpired(now time.Time) bool {
c.expireLock.RLock()
defer c.expireLock.RUnlock()
return c.expires.Before(now)
}
// setAlive updates expiration time.
func (c *expiringCache) setAlive() {
t := timeNow().Add(cacheDuration)
c.expireLock.Lock()
c.expires = t
c.expireLock.Unlock()
}
type expiringCacheNoLock struct {
expires time.Time
}
// isExpired check if the cache data is expired.
func (c *expiringCacheNoLock) isExpired(now time.Time) bool {
return c.expires.Before(now)
}
// setAlive updates expiration time.
func (c *expiringCacheNoLock) setAlive() {
t := timeNow().Add(cacheDuration)
c.expires = t
}
| [
"\"FYNE_CACHE\""
]
| []
| [
"FYNE_CACHE"
]
| [] | ["FYNE_CACHE"] | go | 1 | 0 | |
io/docker.go | package io
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/fsouza/go-dockerclient"
)
//hide the docker client side types
type imageInspect struct {
wrapped *docker.Image
}
//hide the docker client side types
type contInspect struct {
wrapped *docker.Container
}
type apiContainers []docker.APIContainers
type apiImages []docker.APIImages
type Port string
type PortBinding struct {
HostIp string
HostPort string
}
type RunConfig struct {
Image string
Attach bool
Volumes map[string]string
Ports map[Port][]PortBinding
Devices map[string]string
Links map[string]string
Privileged bool
WaitOutput bool
}
type TagInfo struct {
Repository string
Tag string
}
type BuildConfig struct {
NoCache bool
RemoveTemporaryContainer bool
}
type CopyArtifact struct {
SourcePath, DestinationDir string
}
type DockerCli interface {
CmdRun(*RunConfig, ...string) (*bytes.Buffer, string, error)
CmdTag(string, bool, *TagInfo) error
CmdCommit(string, *TagInfo) (string, error)
CmdBuild(*BuildConfig, string, string) error
//Copy actually does two different things: copies artifacts from the source tree into a tarball
//or copies artifacts from a container (given here as an image) into a tarball. In both cases
//the resulting tarball is sent to the docker server for a build.
CmdCopy(map[string]string, string, string, []*CopyArtifact, string) error
CmdLastModTime(map[string]string, string, []*CopyArtifact) (time.Time, error)
CmdStop(string) error
CmdRmContainer(string) error
CmdRmImage(string) error
InspectImage(string) (InspectedImage, error)
InspectContainer(string) (InspectedContainer, error)
ListContainers() (apiContainers, error)
ListImages() (apiImages, error)
}
type InspectedImage interface {
CreatedTime() time.Time
ID() string
ContainerID() string
}
type InspectedContainer interface {
Running() bool
CreatedTime() time.Time
ContainerName() string
ContainerID() string
ExitStatus() int
Ip() string
Ports() []string
}
//NewDocker returns a connection to the docker server. Pickett assumes that
//the DockerCli is "passed in from the outside".
func NewDockerCli() (DockerCli, error) {
if err := validateDockerHost(); err != nil {
return nil, err
}
return newDockerCli()
}
type dockerCli struct {
client *docker.Client
}
// newDockerCli builds a new docker interface and returns it. It
// assumes that the DOCKER_HOST env var has already been
// validated.
func newDockerCli() (DockerCli, error) {
result := &dockerCli{}
var err error
result.client, err = docker.NewClient(os.Getenv("DOCKER_HOST"))
if err != nil {
return nil, err
}
flog.Debugf("[docker cmd] export DOCKER_HOST='%s'", os.Getenv("DOCKER_HOST"))
return result, nil
}
func (d *dockerCli) createNamedContainer(config *docker.Config) (*docker.Container, error) {
tries := 0
ok := false
var cont *docker.Container
var err error
var opts docker.CreateContainerOptions
for tries < 3 {
opts.Config = config
opts.Name = newPhrase()
flog.Debugf("[docker cmd] Attempting to create container %s (%d) from image: %s", opts.Name, tries, opts.Config.Image)
cont, err = d.client.CreateContainer(opts)
if err != nil {
detail, ok := err.(*docker.Error)
if ok && detail.Status == 409 {
tries++
continue
} else {
return nil, err
}
}
ok = true
break
}
if !ok {
opts.Name = "" //fallback
opts.Name = newPhrase()
flog.Debugf("[docker cmd] Creating container named: %s", opts.Name)
cont, err = d.client.CreateContainer(opts)
if err != nil {
return nil, err
}
}
return cont, nil
}
var EMPTY struct{}
func (d *dockerCli) CmdRun(runconf *RunConfig, s ...string) (*bytes.Buffer, string, error) {
config := &docker.Config{}
config.Cmd = s
config.Image = runconf.Image
fordebug := new(bytes.Buffer)
cont, err := d.createNamedContainer(config)
if err != nil {
return nil, "", err
}
fordebug.WriteString(fmt.Sprintf("docker run %v ", cont.Name))
host := &docker.HostConfig{}
//flatten links for consumption by go-dockerclient
flatLinks := []string{}
for k, v := range runconf.Links {
flatLinks = append(flatLinks, fmt.Sprintf("%s:%s", k, v))
fordebug.WriteString(fmt.Sprintf("--link %s:%s ", k, v))
}
host.Links = flatLinks
host.Binds = []string{}
for k, v := range runconf.Volumes {
host.Binds = append(host.Binds, fmt.Sprintf("%s:%s", k, v))
fordebug.WriteString(fmt.Sprintf("-v %s:%s ", k, v))
}
for k, v := range runconf.Devices {
if len(s) >= 2 {
instance, _ := strconv.Atoi(s[2])
letter := 'b' + instance
k = strings.Replace(k, "?", string(letter), -1)
}
host.Binds = append(host.Binds, fmt.Sprintf("%s:%s", k, v))
}
//convert the types of the elements of this map so that *our* clients don't
//see the inner types
convertedMap := make(map[docker.Port][]docker.PortBinding)
for k, v := range runconf.Ports {
key := docker.Port(k)
convertedMap[key] = []docker.PortBinding{}
for _, m := range v {
convertedMap[key] = append(convertedMap[key],
docker.PortBinding{HostIp: m.HostIp, HostPort: m.HostPort})
fordebug.WriteString(fmt.Sprintf("-p %s:%s:%s ", m.HostIp, m.HostPort, m.HostPort))
}
}
host.PortBindings = convertedMap
host.Privileged = runconf.Privileged
flog.Debugf("[docker cmd] %s%s", fordebug.Bytes(), strings.Join(config.Cmd, " "))
err = d.client.StartContainer(cont.ID, host)
if err != nil {
return nil, "", err
}
if runconf.Attach {
//These are the right settings if you want to "watch" the output of the command and wait for
//it to terminate
err = d.client.AttachToContainer(docker.AttachToContainerOptions{
Container: cont.ID,
InputStream: os.Stdin,
OutputStream: os.Stdout,
ErrorStream: os.Stderr,
Logs: true,
Stdin: true,
Stdout: true,
Stderr: true,
Stream: true,
})
if err != nil {
return nil, "", err
}
// There's a docker bug where Attach prematurely exits.
// To prevent that we DO allow an attach to also wait for
// the container to exit.
if runconf.WaitOutput {
status, err := d.client.WaitContainer(cont.ID)
if err != nil {
return nil, "", err
} else if status != 0 {
return nil, "", fmt.Errorf("Non-zero exitcode %v from %v", status, cont.Name)
}
}
return nil, cont.ID, nil
} else if runconf.WaitOutput {
// wait for result and return a buffer with the output
_, err = d.client.WaitContainer(cont.ID)
if err != nil {
return nil, "", err
}
out := new(bytes.Buffer)
err = d.client.AttachToContainer(docker.AttachToContainerOptions{
Container: cont.ID,
OutputStream: out,
ErrorStream: out,
Logs: true,
Stdout: true,
Stderr: true,
})
if err != nil {
return nil, "", err
}
return out, cont.ID, nil
}
//just start it and return with the id
return nil, cont.ID, nil
}
func (d *dockerCli) CmdStop(contID string) error {
flog.Debugf("Stopping container %s\n", contID)
return d.client.StopContainer(contID, 2)
}
func (d *dockerCli) CmdRmImage(imgID string) error {
flog.Debugf("Removing image %s\n", imgID)
return d.client.RemoveImage(imgID)
}
func (d *dockerCli) CmdRmContainer(contID string) error {
flog.Debugf("removing container %s\n", contID)
opts := docker.RemoveContainerOptions{
ID: contID,
}
return d.client.RemoveContainer(opts)
}
func (d *dockerCli) CmdTag(image string, force bool, info *TagInfo) error {
flog.Debugf("[docker cmd] Tagging image %s as %s:%s\n", image, info.Repository, info.Tag)
return d.client.TagImage(image, docker.TagImageOptions{
Force: force,
Tag: info.Tag,
Repo: info.Repository,
})
}
func (d *dockerCli) CmdCommit(containerId string, info *TagInfo) (string, error) {
opts := docker.CommitContainerOptions{
Container: containerId,
}
if info != nil {
opts.Tag = info.Tag
opts.Repository = info.Repository
}
flog.Debugf("[docker cmd] Commit of container. Options: Container: %s, Tag: %s, Repo: %s", opts.Container, opts.Tag, opts.Repository)
image, err := d.client.CommitContainer(opts)
if err != nil {
return "", err
}
return image.ID, nil
}
func (d *dockerCli) tarball(pathToDir string, localName string, tw *tar.Writer) error {
flog.Debugf("tarball construction in '%s' (as '%s')", pathToDir, localName)
dir, err := os.Open(pathToDir)
if err != nil {
return err
}
info, err := dir.Stat()
if err != nil {
return err
}
if !info.IsDir() {
return fmt.Errorf("expected %s to be a directory!", dir)
}
names, err := dir.Readdirnames(0)
if err != nil {
return err
}
for _, name := range names {
path := filepath.Join(pathToDir, name)
lname := filepath.Join(localName, name)
isFile, err := d.writeFullFile(tw, path, lname)
if err != nil {
return err
}
if !isFile {
err := d.tarball(path, filepath.Join(localName, name), tw)
if err != nil {
return err
}
continue
}
}
return nil
}
func (d *dockerCli) writeFullFile(tw *tar.Writer, path string, localName string) (bool, error) {
info, err := os.Stat(path)
if err != nil {
return false, err
}
if info.IsDir() {
return false, nil
}
//now we are sure it's a file
hdr := &tar.Header{
Name: localName,
Size: info.Size(),
Mode: int64(info.Mode()),
ModTime: info.ModTime(),
}
if err := tw.WriteHeader(hdr); err != nil {
return false, err
}
fp, err := os.Open(path)
if err != nil {
return false, err
}
content, err := ioutil.ReadAll(fp)
if err != nil {
return false, err
}
if _, err := tw.Write(content); err != nil {
return false, err
}
flog.Debugf("added %s as %s to tarball", path, localName)
return true, nil
}
//XXX is it safe to use /bin/true?
func (d *dockerCli) makeDummyContainerToGetAtImage(img string) (string, error) {
cont, err := d.client.CreateContainer(docker.CreateContainerOptions{
Config: &docker.Config{
Image: img,
Entrypoint: []string{"/bin/true"},
},
})
if err != nil {
return "", err
}
return cont.ID, nil
}
func (d *dockerCli) CmdLastModTime(realPathSource map[string]string, img string,
artifacts []*CopyArtifact) (time.Time, error) {
if len(realPathSource) == len(artifacts) {
flog.Debugln("no work to do in the container for last mod time, no artifacts inside it.")
return time.Time{}, nil
}
cont, err := d.makeDummyContainerToGetAtImage(img)
if err != nil {
return time.Time{}, err
}
err = d.client.StartContainer(cont, &docker.HostConfig{})
if err != nil {
return time.Time{}, err
}
//walk each artifact, getting it from the container, skipping sources
best := time.Time{}
for _, a := range artifacts {
_, found := realPathSource[a.SourcePath]
if found {
continue
}
//pull it from container
buf := new(bytes.Buffer)
flog.Debugf("copying from container %s. Resource %s to %s", cont, a.SourcePath, a.DestinationDir)
err = d.client.CopyFromContainer(docker.CopyFromContainerOptions{
OutputStream: buf,
Container: cont,
Resource: a.SourcePath,
})
if err != nil {
return time.Time{}, err
}
//var out bytes.Buffer
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
for {
entry, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return time.Time{}, err
}
flog.Debugf("read file from container: %s, %v", entry.Name, entry.ModTime)
if !entry.FileInfo().IsDir() {
if entry.ModTime.After(best) {
best = entry.ModTime
}
}
}
}
return best, nil
}
//
// HACK! Poll the server until the image is built.
//
func hacky_poll(d *dockerCli) chan struct{} {
term := make(chan struct{})
go func() {
for {
d.client.Ping()
select {
case <-time.After(300 * time.Millisecond):
case <-term:
return
}
}
}()
return term
}
func (d *dockerCli) CmdCopy(realPathSource map[string]string, imgSrc string, imgDest string,
artifacts []*CopyArtifact, resultTag string) error {
cont, err := d.makeDummyContainerToGetAtImage(imgSrc)
if err != nil {
return err
}
if len(realPathSource) != len(artifacts) {
flog.Debugln("starting container because we need to retrieve artifacts from it")
//don't bother starting the container untless there is something we need from it
err = d.client.StartContainer(cont, &docker.HostConfig{})
if err != nil {
return err
}
} else {
flog.Debugln("all artifacts found in source tree, not starting container")
}
dockerFile := new(bytes.Buffer)
resulTarball := new(bytes.Buffer)
tw := tar.NewWriter(resulTarball)
dockerFile.WriteString(fmt.Sprintf("FROM %s\n", imgDest))
//walk each artifact, potentially getting it from the container
for _, a := range artifacts {
truePath, found := realPathSource[a.SourcePath]
if found {
isFile, err := d.writeFullFile(tw, truePath, a.SourcePath)
if err != nil {
return err
}
//kinda hacky: we use a.SourcePath as the name *inside* the tarball so we can get the
//directory name right on the final output
flog.Debugf("COPY %s TO %s.", a.SourcePath, a.DestinationDir)
dockerFile.WriteString(fmt.Sprintf("COPY %s %s\n", a.SourcePath, a.DestinationDir))
if !isFile {
if err := d.tarball(truePath, a.SourcePath, tw); err != nil {
return err
}
}
} else {
//pull it from container
buf := new(bytes.Buffer)
err = d.client.CopyFromContainer(docker.CopyFromContainerOptions{
OutputStream: buf,
Container: cont,
Resource: a.SourcePath,
})
if err != nil {
return err
}
//var out bytes.Buffer
r := bytes.NewReader(buf.Bytes())
tr := tar.NewReader(r)
for {
entry, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
flog.Debugf("read file from container: %s", entry.Name)
if !entry.FileInfo().IsDir() {
dockerFile.WriteString(fmt.Sprintf("COPY %s %s\n", entry.Name, a.DestinationDir+"/"+entry.Name))
if err := tw.WriteHeader(entry); err != nil {
return err
}
if _, err := io.Copy(tw, tr); err != nil {
return err
}
}
}
}
}
hdr := &tar.Header{
Name: "Dockerfile",
Size: int64(dockerFile.Len()),
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(tw, dockerFile); err != nil {
return err
}
if err := tw.Close(); err != nil {
return err
}
opts := docker.BuildImageOptions{
Name: resultTag,
InputStream: resulTarball,
OutputStream: os.Stdout,
RmTmpContainer: true,
SuppressOutput: false,
NoCache: true,
}
flog.Debugf("[docker cmd] Building image. Name: %s", opts.Name)
term := hacky_poll(d)
defer close(term)
if err := d.client.BuildImage(opts); err != nil {
return err
}
return nil
}
func (d *dockerCli) CmdBuild(config *BuildConfig, pathToDir string, tag string) error {
//build tarball
out := new(bytes.Buffer)
tw := tar.NewWriter(out)
err := d.tarball(pathToDir, "", tw)
if err != nil {
return err
}
if err := tw.Close(); err != nil {
return err
}
opts := docker.BuildImageOptions{
Name: tag,
InputStream: bytes.NewBuffer(out.Bytes()),
OutputStream: os.Stdout,
RmTmpContainer: config.RemoveTemporaryContainer,
SuppressOutput: false,
NoCache: config.NoCache,
}
term := hacky_poll(d)
defer close(term)
flog.Debugf("[docker cmd] Building image. Name: %s", opts.Name)
if err := d.client.BuildImage(opts); err != nil {
return err
}
return nil
}
func (c *dockerCli) InspectImage(n string) (InspectedImage, error) {
i, err := c.client.InspectImage(n)
if err != nil {
return nil, err
}
return &imageInspect{
wrapped: i,
}, nil
}
func (c *dockerCli) InspectContainer(n string) (InspectedContainer, error) {
i, err := c.client.InspectContainer(n)
if err != nil {
return nil, err
}
return &contInspect{
wrapped: i,
}, nil
}
func (d *dockerCli) ListContainers() (apiContainers, error) {
containers, err := d.client.ListContainers(docker.ListContainersOptions{All: true})
return containers, err
}
func (d *dockerCli) ListImages() (apiImages, error) {
return d.client.ListImages(true)
}
//Wrappers for getting inspections
func (i *imageInspect) CreatedTime() time.Time {
return i.wrapped.Created
}
func (i *imageInspect) ID() string {
return i.wrapped.ID
}
func (i *imageInspect) ContainerID() string {
return i.wrapped.Container
}
func (c *contInspect) Ip() string {
return c.wrapped.NetworkSettings.IPAddress
}
func (c *contInspect) Ports() []string {
ports := []string{}
for k, _ := range c.wrapped.NetworkSettings.Ports {
ports = append(ports, k.Port())
}
return ports
}
func (c *contInspect) CreatedTime() time.Time {
return c.wrapped.Created
}
func (c *contInspect) Running() bool {
return c.wrapped.State.Running
}
func (c *contInspect) ContainerName() string {
return strings.TrimLeft(c.wrapped.Name, "/")
}
func (c *contInspect) ContainerID() string {
return c.wrapped.ID
}
func (c *contInspect) ExitStatus() int {
return c.wrapped.State.ExitCode
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
cmd/bincastle/bincastle.go | package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/signal"
"os/user"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/containerd/containerd/namespaces"
"github.com/hashicorp/go-multierror"
"github.com/moby/buildkit/client/llb"
"github.com/opencontainers/runc/libcontainer"
"github.com/sipsma/bincastle/buildkit"
"github.com/sipsma/bincastle/ctr"
"github.com/sipsma/bincastle/graph"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"golang.org/x/sys/unix"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
)
const (
ctrName = "system"
runArg = "run"
internalRunArg = "internalRun"
)
var (
sshAgentSock = os.Getenv("SSH_AUTH_SOCK")
bincastleSock = os.Getenv("BINCASTLE_SOCK")
exportImportFlags = []cli.Flag{
&cli.StringFlag{
Name: "export-cache",
Usage: "registry ref to export cached results to",
},
&cli.StringFlag{
Name: "import-cache",
Usage: "registry ref to import cached results from",
},
}
// TODO imageExportFlags are only here for now because they are only meant
// for internal use. In the future, once image export is more intuitive in
// that it results in a whole graph getting exported rather than just the
// single exec layer, it will be made public
imageExportFlags = []cli.Flag{
&cli.StringFlag{
Name: "export-image",
Usage: "hidden: export the result of the exec as an image",
Hidden: true,
},
}
verboseFlags = []cli.Flag{&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
Usage: "show full output from every build",
}}
)
func joinflags(flagss ...[]cli.Flag) []cli.Flag {
var joined []cli.Flag
for _, flags := range flagss {
joined = append(joined, flags...)
}
return joined
}
func init() {
logrus.SetOutput(ioutil.Discard)
if len(os.Args) > 1 && os.Args[1] == ctr.RuncInitArg {
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, _ := libcontainer.New("", libcontainer.RootlessCgroupfs)
err := factory.StartInitialization()
panic(err)
}
}
func main() {
selfBin, err := os.Readlink("/proc/self/exe")
if err != nil {
panic(err)
}
app := &cli.App{
Commands: []*cli.Command{
{
Name: runArg,
Usage: "start the system in a rootless container",
Flags: joinflags(exportImportFlags, imageExportFlags, verboseFlags),
Action: func(c *cli.Context) (err error) {
you, err := user.Current()
if err != nil {
return fmt.Errorf("failed to get current user: %w", err)
}
homeDir := you.HomeDir
if homeDir == "" {
return fmt.Errorf("cannot find user's home dir (is the $HOME env var set?)")
}
varDir := filepath.Join(homeDir, ".bincastle", "var")
err = os.MkdirAll(varDir, 0700)
if err != nil {
return err
}
ctrsDir := filepath.Join(homeDir, ".bincastle", "ctrs")
err = os.MkdirAll(ctrsDir, 0700)
if err != nil {
return err
}
ctrStateDir, err := filepath.EvalSymlinks(ctrsDir)
if err != nil {
return fmt.Errorf(
"failed to evaluate symlinks in container state root dir: %w", err)
}
ctrState := ctr.ContainerStateRoot(ctrStateDir).ContainerState(ctrName)
mounts := ctr.DefaultMounts().With(
ctr.BindMount{
Dest: "/etc/resolv.conf",
Source: "/etc/resolv.conf",
},
ctr.BindMount{
Dest: "/etc/hosts",
Source: "/etc/hosts",
},
ctr.BindMount{
Dest: "/dev/fuse",
Source: "/dev/fuse",
},
ctr.BindMount{
Dest: "/bincastle",
Source: selfBin,
// NOTE: not setting this readonly because doing so can fail with
// EPERM when selfBin is not already mounted read-only. Later
// in the inner container it can be set to a read-only bind mount
// due to the workarounds made possible via the other mount backends.
},
ctr.BindMount{
Dest: "/var",
Source: varDir,
},
)
// TODO this should be optional and default to not happening (you are giving
// potentially untrusted code access to your ssh agent sock)
var env []string
if sshAgentSock != "" {
mounts = mounts.With(ctr.BindMount{
Dest: "/run/ssh-agent.sock",
Source: sshAgentSock,
})
env = append(env, "SSH_AUTH_SOCK=/run/ssh-agent.sock")
}
bcArgs := buildkit.BincastleArgs{
ImportCacheRef: c.String("import-cache"),
ExportCacheRef: c.String("export-cache"),
ExportImageRef: c.String("export-image"),
SSHAgentSockPath: sshAgentSock,
BincastleSockPath: bincastleSock,
Verbose: c.Bool("verbose"),
}
if !strings.HasPrefix(c.Args().Get(0), "https://") && !strings.HasPrefix(c.Args().Get(0), "ssh://") {
bcArgs.SourceLocalDir = c.Args().Get(0)
bcArgs.SourceSubdir = c.Args().Get(1)
} else {
bcArgs.SourceGitURL = c.Args().Get(0)
bcArgs.SourceGitRef = c.Args().Get(1)
bcArgs.SourceSubdir = c.Args().Get(2)
}
for _, kv := range os.Environ() {
if strings.HasPrefix(kv, graph.EnvOverridesPrefix) {
bcArgs.LocalOverrides = append(bcArgs.LocalOverrides, kv)
}
}
var needFuseOverlayfs bool
// TODO don't hardcode binary location, also /var is a weird place
if _, err := os.Stat(filepath.Join(homeDir, ".bincastle/var/fuse-overlayfs")); os.IsNotExist(err) {
needFuseOverlayfs = true
} else if err != nil {
return err
}
ctx, cancel := context.WithCancel(
namespaces.WithNamespace(context.Background(), "buildkit"))
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
goCount := 3
errCh := make(chan error, goCount)
if bcArgs.BincastleSockPath == "" {
bcArgs.BincastleSockPath = filepath.Join(homeDir, ".bincastle/var/bincastle.sock")
go func() {
defer cancel()
errCh <- runCtr(ctx, ctrState, ctr.ContainerDef{
ContainerProc: ctr.ContainerProc{
// don't use /proc/self/exe directly because it ends up being a
// memfd created by runc, which wreaks havoc later when inner containers
// need to mount /proc/self/exe to /bincastle
Args: append([]string{"/bincastle", internalRunArg}, os.Args[2:]...),
Env: env,
WorkingDir: "/var",
Uid: uint32(unix.Geteuid()),
Gid: uint32(unix.Getegid()),
Capabilities: &ctr.AllCaps,
},
Hostname: "bincastle",
Mounts: mounts,
MountBackend: ctr.NoOverlayfsBackend{},
ReadOnlyRootfs: true,
})
}()
} else {
goCount--
needFuseOverlayfs = false
}
go func() {
defer cancel()
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 10*time.Second)
defer timeoutCancel()
// TODO don't hardcode
if err := waitToExist(timeoutCtx, bcArgs.BincastleSockPath); err != nil {
errCh <- err
return
}
if needFuseOverlayfs {
if fuseoverlayDef, err := llb.Image(
// TODO don't hardcode
"eriksipsma/bincastle-fuse-overlayfs",
).Marshal(ctx, llb.LinuxAmd64); err != nil {
errCh <- err
return
} else if err := buildkit.BincastleBuild(ctx, buildkit.BincastleArgs{
LLB: fuseoverlayDef,
ExportLocalDir: filepath.Join(homeDir, ".bincastle/var"), // TODO don't hardcode
ImportCacheRef: bcArgs.ImportCacheRef,
SSHAgentSockPath: bcArgs.SSHAgentSockPath,
// TODO don't hardcode
BincastleSockPath: bcArgs.BincastleSockPath,
Verbose: c.Bool("verbose"),
}); err != nil {
errCh <- err
return
}
}
errCh <- buildkit.BincastleBuild(ctx, bcArgs)
}()
go func() {
defer cancel()
select {
case sig := <-sigchan:
errCh <- fmt.Errorf("received signal %s", sig)
case <-ctx.Done():
errCh <- nil
}
}()
var finalErr error
for i := 0; i < goCount; i++ {
finalErr = multierror.Append(finalErr, <-errCh).ErrorOrNil()
}
return finalErr
},
},
{
Name: internalRunArg,
Hidden: true,
Flags: joinflags(exportImportFlags, imageExportFlags, verboseFlags),
Action: func(c *cli.Context) (err error) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
// TODO don't hardcode
ctrStateRoot := ctr.ContainerStateRoot("/var/ctrs")
ctrState := ctrStateRoot.ContainerState(ctrName)
if ctrState.ContainerExists() {
return ctr.ContainerExistsError{ctrName}
}
serve, err := buildkit.Buildkitd(ctr.FuseOverlayfsBackend{
FuseOverlayfsBin: "/var/fuse-overlayfs",
})
if err != nil {
return err
}
goCount := 2
errCh := make(chan error, goCount)
ctx, cancel := context.WithCancel(
namespaces.WithNamespace(context.Background(), "buildkit"))
go func() {
defer cancel()
errCh <- serve(ctx)
}()
go func() {
defer cancel()
select {
case sig := <-sigchan:
errCh <- fmt.Errorf("received signal %s", sig)
case <-ctx.Done():
errCh <- nil
}
}()
var finalErr error
for i := 0; i < goCount; i++ {
finalErr = multierror.Append(finalErr, <-errCh).ErrorOrNil()
}
return finalErr
},
},
},
}
if err := app.Run(os.Args); err != nil {
panic(err)
}
}
func runCtr(ctx context.Context, ctrState ctr.ContainerState, def ctr.ContainerDef) error {
container, err := ctrState.Start(def)
if err != nil {
return fmt.Errorf("failed to run container: %w", err)
}
ctx, cancel := context.WithCancel(ctx)
ioctx, iocancel := context.WithCancel(context.Background())
goCount := 2
errCh := make(chan error, goCount)
go func() {
defer cancel()
defer iocancel()
waitErr := container.Wait(ctx).Err
if waitErr == context.Canceled {
waitErr = nil
}
destroyErr := container.Destroy(15 * time.Second) // TODO don't hardcode
errCh <- multierror.Append(waitErr, destroyErr).ErrorOrNil()
}()
go func() {
defer cancel()
attachErr := ctr.AttachSelfConsole(ioctx, container)
if attachErr == context.Canceled {
attachErr = nil
}
if attachErr != nil {
attachErr = fmt.Errorf("error during console attach: %w", attachErr)
}
errCh <- attachErr
}()
var finalErr error
for i := 0; i < goCount; i++ {
finalErr = multierror.Append(finalErr, <-errCh).ErrorOrNil()
}
return finalErr
}
func waitToExist(ctx context.Context, path string) error {
for {
if _, err := os.Stat(path); err == nil {
return nil
} else if !os.IsNotExist(err) {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(50 * time.Millisecond):
}
}
}
| [
"\"SSH_AUTH_SOCK\"",
"\"BINCASTLE_SOCK\""
]
| []
| [
"BINCASTLE_SOCK",
"SSH_AUTH_SOCK"
]
| [] | ["BINCASTLE_SOCK", "SSH_AUTH_SOCK"] | go | 2 | 0 | |
base/base_information.py | # coding = utf-8
import os
import datetime
import sys
Date = datetime.datetime.now()
environ = os.environ
UserName = environ['USER']
UserHome = environ['HOME']
ExcutableName = os.path.split(sys.argv[0])[-1].split('.')[0] | []
| []
| []
| [] | [] | python | 0 | 0 | |
raspagem2.py | import base64
import gspread
import json
import pandas as pd
import os
from contagem_palavras import conta_palavras
from raspador_sites import coleta_folha, coleta_oglobo, coleta_estadao
spreadsheet_id = os.environ['GOOGLE_SHEET_ID']
conteudo_codificado = os.environ['GOOGLE_SHEETS_CREDENTIALS']
conteudo = base64.b64decode(conteudo_codificado)
credentials = json.loads(conteudo)
service_account = gspread.service_account_from_dict(credentials)
spreadsheet = service_account.open_by_key(spreadsheet_id)
folha_sheet = spreadsheet.worksheet('folha')
oglobo_sheet = spreadsheet.worksheet('oglobo')
estadao_sheet = spreadsheet.worksheet('estadao')
# Raspagem de dados
try:
coleta_folha(folha_sheet)
finally:
next
try:
coleta_estadao(estadao_sheet)
finally:
next
try:
coleta_oglobo(oglobo_sheet)
finally:
next
| []
| []
| [
"GOOGLE_SHEETS_CREDENTIALS",
"GOOGLE_SHEET_ID"
]
| [] | ["GOOGLE_SHEETS_CREDENTIALS", "GOOGLE_SHEET_ID"] | python | 2 | 0 | |
ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.exec.mr;
import static org.apache.hadoop.hive.ql.exec.mr.MapRedTask.HADOOP_CLIENT_OPTS;
import static org.apache.hadoop.hive.ql.exec.mr.MapRedTask.HADOOP_MEM_KEY;
import static org.apache.hadoop.hive.ql.exec.mr.MapRedTask.HADOOP_OPTS_KEY;
import static org.apache.hadoop.hive.ql.exec.mr.MapRedTask.HIVE_SYS_PROP;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Serializable;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.LogUtils;
import org.apache.hadoop.hive.common.io.CachingPrintStream;
import org.apache.hadoop.hive.common.metrics.common.Metrics;
import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.exec.BucketMatcher;
import org.apache.hadoop.hive.ql.exec.FetchOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.SecureCmdDoAs;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionException;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.plan.BucketMapJoinContext;
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hive.common.util.StreamPrinter;
/**
* MapredLocalTask represents any local work (i.e.: client side work) that hive needs to
* execute. E.g.: This is used for generating Hashtables for Mapjoins on the client
* before the Join is executed on the cluster.
*
* MapRedLocalTask does not actually execute the work in process, but rather generates
* a command using ExecDriver. ExecDriver is what will finally drive processing the records.
*/
public class MapredLocalTask extends Task<MapredLocalWork> implements Serializable {
private static final long serialVersionUID = 1L;
private final Map<String, FetchOperator> fetchOperators = new HashMap<String, FetchOperator>();
protected HadoopJobExecHelper jobExecHelper;
private JobConf job;
public static transient final Log l4j = LogFactory.getLog(MapredLocalTask.class);
static final String HIVE_LOCAL_TASK_CHILD_OPTS_KEY = "HIVE_LOCAL_TASK_CHILD_OPTS";
public static MemoryMXBean memoryMXBean;
private static final Log LOG = LogFactory.getLog(MapredLocalTask.class);
// not sure we need this exec context; but all the operators in the work
// will pass this context throught
private ExecMapperContext execContext = null;
private Process executor;
private SecureCmdDoAs secureDoAs;
public MapredLocalTask() {
super();
}
public MapredLocalTask(MapredLocalWork plan, JobConf job, boolean isSilent) throws HiveException {
setWork(plan);
this.job = job;
console = new LogHelper(LOG, isSilent);
}
public void setExecContext(ExecMapperContext execContext) {
this.execContext = execContext;
}
public void updateTaskMetrics(Metrics metrics) {
metrics.incrementCounter(MetricsConstant.HIVE_MR_TASKS);
}
@Override
public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) {
super.initialize(conf, queryPlan, driverContext);
job = new JobConf(conf, ExecDriver.class);
execContext = new ExecMapperContext(job);
//we don't use the HadoopJobExecHooks for local tasks
this.jobExecHelper = new HadoopJobExecHelper(job, console, this, null);
}
public static String now() {
Calendar cal = Calendar.getInstance();
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-mm-dd hh:mm:ss");
return sdf.format(cal.getTime());
}
@Override
public boolean requireLock() {
return true;
}
@Override
public int execute(DriverContext driverContext) {
if (conf.getBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD)) {
// send task off to another jvm
return executeInChildVM(driverContext);
} else {
// execute in process
return executeInProcess(driverContext);
}
}
public int executeInChildVM(DriverContext driverContext) {
// execute in child jvm
try {
// generate the cmd line to run in the child jvm
Context ctx = driverContext.getCtx();
String hiveJar = conf.getJar();
String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN);
conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR));
// write out the plan to a local file
Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml");
MapredLocalWork plan = getWork();
LOG.info("Generating plan file " + planPath.toString());
OutputStream out = null;
try {
out = FileSystem.getLocal(conf).create(planPath);
Utilities.serializePlan(plan, out, conf);
out.close();
out = null;
} finally {
IOUtils.closeQuietly(out);
}
String isSilent = "true".equalsIgnoreCase(System.getProperty("test.silent")) ? "-nolog" : "";
String jarCmd;
jarCmd = hiveJar + " " + ExecDriver.class.getName();
String hiveConfArgs = ExecDriver.generateCmdLine(conf, ctx);
String cmdLine = hadoopExec + " jar " + jarCmd + " -localtask -plan " + planPath.toString()
+ " " + isSilent + " " + hiveConfArgs;
String workDir = (new File(".")).getCanonicalPath();
String files = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE);
if (!files.isEmpty()) {
cmdLine = cmdLine + " -files " + files;
workDir = ctx.getLocalTmpPath().toUri().getPath();
if (!(new File(workDir)).mkdir()) {
throw new IOException("Cannot create tmp working dir: " + workDir);
}
for (String f : StringUtils.split(files, ',')) {
Path p = new Path(f);
String target = p.toUri().getPath();
String link = workDir + Path.SEPARATOR + p.getName();
if (FileUtil.symLink(target, link) != 0) {
throw new IOException("Cannot link to added file: " + target + " from: " + link);
}
}
}
// Inherit Java system variables
String hadoopOpts;
StringBuilder sb = new StringBuilder();
Properties p = System.getProperties();
for (String element : HIVE_SYS_PROP) {
if (p.containsKey(element)) {
sb.append(" -D" + element + "=" + p.getProperty(element));
}
}
hadoopOpts = sb.toString();
// Inherit the environment variables
String[] env;
Map<String, String> variables = new HashMap<String, String>(System.getenv());
// The user can specify the hadoop memory
// if ("local".equals(conf.getVar(HiveConf.ConfVars.HADOOPJT))) {
// if we are running in local mode - then the amount of memory used
// by the child jvm can no longer default to the memory used by the
// parent jvm
// int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM);
if (hadoopMem == 0) {
// remove env var that would default child jvm to use parent's memory
// as default. child jvm would use default memory for a hadoop client
variables.remove(HADOOP_MEM_KEY);
} else {
// user specified the memory for local mode hadoop run
console.printInfo(" set heap size\t" + hadoopMem + "MB");
variables.put(HADOOP_MEM_KEY, String.valueOf(hadoopMem));
}
// } else {
// nothing to do - we are not running in local mode - only submitting
// the job via a child process. in this case it's appropriate that the
// child jvm use the same memory as the parent jvm
// }
//Set HADOOP_USER_NAME env variable for child process, so that
// it also runs with hadoop permissions for the user the job is running as
// This will be used by hadoop only in unsecure(/non kerberos) mode
String endUserName = Utils.getUGI().getShortUserName();
LOG.debug("setting HADOOP_USER_NAME\t" + endUserName);
variables.put("HADOOP_USER_NAME", endUserName);
if (variables.containsKey(HADOOP_OPTS_KEY)) {
variables.put(HADOOP_OPTS_KEY, variables.get(HADOOP_OPTS_KEY) + hadoopOpts);
} else {
variables.put(HADOOP_OPTS_KEY, hadoopOpts);
}
//For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting
//Hiveserver2 using "-hiveconf hive.hadoop.classpath=%HIVE_LIB%". This is to combine path(s).
if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH)!= null)
{
if (variables.containsKey("HADOOP_CLASSPATH"))
{
variables.put("HADOOP_CLASSPATH", variables.get("HADOOP_CLASSPATH") + ";" + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH));
} else {
variables.put("HADOOP_CLASSPATH", HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_HADOOP_CLASSPATH));
}
}
if(variables.containsKey(MapRedTask.HIVE_DEBUG_RECURSIVE)) {
MapRedTask.configureDebugVariablesForChildJVM(variables);
}
if(UserGroupInformation.isSecurityEnabled() &&
UserGroupInformation.isLoginKeytabBased()) {
//If kerberos security is enabled, and HS2 doAs is enabled,
// then additional params need to be set so that the command is run as
// intended user
secureDoAs = new SecureCmdDoAs(conf);
secureDoAs.addEnv(variables);
}
// If HIVE_LOCAL_TASK_CHILD_OPTS is set, child VM environment setting
// HADOOP_CLIENT_OPTS will be replaced with HIVE_LOCAL_TASK_CHILD_OPTS.
// HADOOP_OPTS is updated too since HADOOP_CLIENT_OPTS is appended
// to HADOOP_OPTS in most cases. This way, the local task JVM can
// have different settings from those of HiveServer2.
if (variables.containsKey(HIVE_LOCAL_TASK_CHILD_OPTS_KEY)) {
String childOpts = variables.get(HIVE_LOCAL_TASK_CHILD_OPTS_KEY);
if (childOpts == null) {
childOpts = "";
}
String clientOpts = variables.put(HADOOP_CLIENT_OPTS, childOpts);
String tmp = variables.get(HADOOP_OPTS_KEY);
if (tmp != null && !StringUtils.isBlank(clientOpts)) {
tmp = tmp.replace(clientOpts, childOpts);
variables.put(HADOOP_OPTS_KEY, tmp);
}
}
env = new String[variables.size()];
int pos = 0;
for (Map.Entry<String, String> entry : variables.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
env[pos++] = name + "=" + value;
LOG.debug("Setting env: " + name + "=" + LogUtils.maskIfPassword(name, value));
}
LOG.info("Executing: " + cmdLine);
// Run ExecDriver in another JVM
executor = Runtime.getRuntime().exec(cmdLine, env, new File(workDir));
CachingPrintStream errPrintStream = new CachingPrintStream(System.err);
StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream);
outPrinter.start();
errPrinter.start();
int exitVal = jobExecHelper.progressLocal(executor, getId());
// wait for stream threads to finish
outPrinter.join();
errPrinter.join();
if (exitVal != 0) {
LOG.error("Execution failed with exit status: " + exitVal);
if (SessionState.get() != null) {
SessionState.get().addLocalMapRedErrors(getId(), errPrintStream.getOutput());
}
} else {
LOG.info("Execution completed successfully");
}
return exitVal;
} catch (Exception e) {
LOG.error("Exception: " + e, e);
return (1);
} finally {
if (secureDoAs != null) {
secureDoAs.close();
}
}
}
public int executeInProcess(DriverContext driverContext) {
// check the local work
if (work == null) {
return -1;
}
if (execContext == null) {
execContext = new ExecMapperContext(job);
}
memoryMXBean = ManagementFactory.getMemoryMXBean();
long startTime = System.currentTimeMillis();
console.printInfo(Utilities.now()
+ "\tStarting to launch local task to process map join;\tmaximum memory = "
+ memoryMXBean.getHeapMemoryUsage().getMax());
execContext.setJc(job);
// set the local work, so all the operator can get this context
execContext.setLocalWork(work);
try {
startForward(null);
long currentTime = System.currentTimeMillis();
long elapsed = currentTime - startTime;
console.printInfo(Utilities.now() + "\tEnd of local task; Time Taken: "
+ Utilities.showTime(elapsed) + " sec.");
} catch (Throwable throwable) {
if (throwable instanceof OutOfMemoryError
|| (throwable instanceof MapJoinMemoryExhaustionException)) {
l4j.error("Hive Runtime Error: Map local work exhausted memory", throwable);
return 3;
} else {
l4j.error("Hive Runtime Error: Map local work failed", throwable);
return 2;
}
}
return 0;
}
public void startForward(String bigTableBucket) throws Exception {
boolean inputFileChangeSenstive = work.getInputFileChangeSensitive();
initializeOperators(new HashMap<FetchOperator, JobConf>());
// for each big table's bucket, call the start forward
if (inputFileChangeSenstive) {
for (Map<String, List<String>> bigTableBucketFiles : work
.getBucketMapjoinContext().getAliasBucketFileNameMapping().values()) {
if (bigTableBucket == null) {
for (String bigTableBucketFile : bigTableBucketFiles.keySet()) {
startForward(inputFileChangeSenstive, bigTableBucketFile);
}
} else if (bigTableBucketFiles.keySet().contains(bigTableBucket)) {
startForward(inputFileChangeSenstive, bigTableBucket);
}
}
} else {
startForward(inputFileChangeSenstive, null);
}
}
private void startForward(boolean inputFileChangeSenstive, String bigTableBucket)
throws Exception {
for (Operator<?> source : work.getAliasToWork().values()) {
source.reset();
}
if (inputFileChangeSenstive) {
execContext.setCurrentBigBucketFile(bigTableBucket);
}
for (Map.Entry<String, FetchOperator> entry : fetchOperators.entrySet()) {
String alias = entry.getKey();
FetchOperator fetchOp = entry.getValue();
if (inputFileChangeSenstive) {
fetchOp.clearFetchContext();
setUpFetchOpContext(fetchOp, alias, bigTableBucket);
}
// get the root operator
Operator<? extends OperatorDesc> forwardOp = work.getAliasToWork().get(alias);
// walk through the operator tree
while (!forwardOp.getDone()) {
InspectableObject row = fetchOp.getNextRow();
if (row == null) {
break;
}
forwardOp.processOp(row.o, 0);
}
forwardOp.flush();
}
for (Operator<?> source : work.getAliasToWork().values()) {
source.close(false);
}
}
private void initializeOperators(Map<FetchOperator, JobConf> fetchOpJobConfMap)
throws HiveException {
for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : work.getAliasToWork().entrySet()) {
LOG.debug("initializeOperators: " + entry.getKey() + ", children = " + entry.getValue().getChildOperators());
}
// this mapper operator is used to initialize all the operators
for (Map.Entry<String, FetchWork> entry : work.getAliasToFetchWork().entrySet()) {
if (entry.getValue() == null) {
continue;
}
JobConf jobClone = new JobConf(job);
TableScanOperator ts = (TableScanOperator)work.getAliasToWork().get(entry.getKey());
// push down projections
ColumnProjectionUtils.appendReadColumns(
jobClone, ts.getNeededColumnIDs(), ts.getNeededColumns());
// push down filters
HiveInputFormat.pushFilters(jobClone, ts);
// create a fetch operator
FetchOperator fetchOp = new FetchOperator(entry.getValue(), jobClone);
fetchOpJobConfMap.put(fetchOp, jobClone);
fetchOperators.put(entry.getKey(), fetchOp);
l4j.info("fetchoperator for " + entry.getKey() + " created");
}
// initialize all forward operator
for (Map.Entry<String, FetchOperator> entry : fetchOperators.entrySet()) {
// get the forward op
String alias = entry.getKey();
Operator<? extends OperatorDesc> forwardOp = work.getAliasToWork().get(alias);
// put the exe context into all the operators
forwardOp.setExecContext(execContext);
// All the operators need to be initialized before process
FetchOperator fetchOp = entry.getValue();
JobConf jobConf = fetchOpJobConfMap.get(fetchOp);
if (jobConf == null) {
jobConf = job;
}
// initialize the forward operator
ObjectInspector objectInspector = fetchOp.getOutputObjectInspector();
forwardOp.initialize(jobConf, new ObjectInspector[] {objectInspector});
l4j.info("fetchoperator for " + entry.getKey() + " initialized");
}
}
private void setUpFetchOpContext(FetchOperator fetchOp, String alias, String currentInputFile)
throws Exception {
BucketMapJoinContext bucketMatcherCxt = this.work.getBucketMapjoinContext();
Class<? extends BucketMatcher> bucketMatcherCls = bucketMatcherCxt.getBucketMatcherClass();
BucketMatcher bucketMatcher = ReflectionUtils.newInstance(bucketMatcherCls,
null);
bucketMatcher.setAliasBucketFileNameMapping(bucketMatcherCxt.getAliasBucketFileNameMapping());
List<Path> aliasFiles = bucketMatcher.getAliasBucketFiles(currentInputFile, bucketMatcherCxt
.getMapJoinBigTableAlias(), alias);
fetchOp.setupContext(aliasFiles);
}
@Override
public boolean isMapRedLocalTask() {
return true;
}
@Override
public Collection<Operator<? extends OperatorDesc>> getTopOperators() {
return getWork().getAliasToWork().values();
}
@Override
public String getName() {
return "MAPREDLOCAL";
}
@Override
public StageType getType() {
//assert false;
return StageType.MAPREDLOCAL;
}
@Override
public void shutdown() {
super.shutdown();
if (executor != null) {
executor.destroy();
executor = null;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
venv/Lib/site-packages/pygame/examples/vgrade.py | #!/usr/bin/env python
"""This example demonstrates creating an image with numpy
python, and displaying that through SDL. You can look at the
method of importing numpy and pygame.surfarray. This method
will fail 'gracefully' if it is not available.
I've tried mixing in a lot of comments where the code might
not be self explanatory, nonetheless it may still seem a bit
strange. Learning to use numpy for images like this takes a
bit of learning, but the payoff is extremely fast image
manipulation in python.
For Pygame 1.9.2 and up, this example also showcases a new feature
of surfarray.blit_surface: array broadcasting. If a source array
has either a width or height of 1, the array is repeatedly blitted
to the surface along that dimension to fill the surface. In fact,
a (1, 1) or (1, 1, 3) array results in a simple surface color fill.
Just so you know how this breaks down. For each sampling of
time, 30% goes to each creating the gradient and blitting the
array. The final 40% goes to flipping/updating the display surface
If using an SDL version at least 1.1.8 the window will have
no border decorations.
The code also demonstrates use of the timer events."""
import os, pygame
from pygame.locals import *
try:
from numpy import *
from numpy.random import *
except ImportError:
raise SystemExit('This example requires numpy and the pygame surfarray module')
pygame.surfarray.use_arraytype('numpy')
timer = 0
def stopwatch(message = None):
"simple routine to time python code"
global timer
if not message:
timer = pygame.time.get_ticks()
return
now = pygame.time.get_ticks()
runtime = (now - timer)/1000.0 + .001
print ("%s %s %s" %
(message, runtime, ('seconds\t(%.2ffps)'%(1.0/runtime))))
timer = now
def VertGradientColumn(surf, topcolor, bottomcolor):
"creates a new 3d vertical gradient array"
topcolor = array(topcolor, copy=0)
bottomcolor = array(bottomcolor, copy=0)
diff = bottomcolor - topcolor
width, height = surf.get_size()
# create array from 0.0 to 1.0 triplets
column = arange(height, dtype='float')/height
column = repeat(column[:, newaxis], [3], 1)
# create a single column of gradient
column = topcolor + (diff * column).astype('int')
# make the column a 3d image column by adding X
column = column.astype('uint8')[newaxis,:,:]
#3d array into 2d array
return pygame.surfarray.map_array(surf, column)
def DisplayGradient(surf):
"choose random colors and show them"
stopwatch()
colors = randint(0, 255, (2, 3))
column = VertGradientColumn(surf, colors[0], colors[1])
pygame.surfarray.blit_array(surf, column)
pygame.display.flip()
stopwatch('Gradient:')
def main():
pygame.init()
pygame.mixer.quit() # remove ALSA underflow messages for Debian squeeze
size = 600, 400
os.environ['SDL_VIDEO_CENTERED'] = '1'
screen = pygame.display.set_mode(size, NOFRAME, 0)
pygame.event.set_blocked(MOUSEMOTION) #keep our queue cleaner
pygame.time.set_timer(USEREVENT, 500)
while 1:
event = pygame.event.wait()
if event.type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN):
break
elif event.type == USEREVENT:
DisplayGradient(screen)
if __name__ == '__main__': main()
| []
| []
| [
"SDL_VIDEO_CENTERED"
]
| [] | ["SDL_VIDEO_CENTERED"] | python | 1 | 0 | |
bootstrap/standalone/src/main/java/org/geysermc/platform/standalone/LoopbackUtil.java | package org.geysermc.platform.standalone;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Paths;
import org.geysermc.common.ChatColor;
public class LoopbackUtil {
private static final String checkExemption = "powershell -Command \"CheckNetIsolation LoopbackExempt -s\""; // Java's Exec feature runs as CMD, NetIsolation is only accessible from PowerShell.
private static final String loopbackCommand = "powershell -Command \"CheckNetIsolation LoopbackExempt -a -n='Microsoft.MinecraftUWP_8wekyb3d8bbwe'\"";
private static final String startScript = "powershell -Command \"Start-Process 'cmd' -ArgumentList /c,%temp%/loopback_minecraft.bat -Verb runAs\"";
public static void checkLoopback(GeyserStandaloneLogger geyserLogger) {
if (System.getProperty("os.name").equalsIgnoreCase("Windows 10")) {
try {
Process process = Runtime.getRuntime().exec(checkExemption);
InputStream is = process.getInputStream();
StringBuilder sb = new StringBuilder();
while (process.isAlive()) {
if (is.available() != 0) {
sb.append((char) is.read());
}
}
String result = sb.toString();
if (!result.contains("minecraftuwp")) {
Files.write(Paths.get(System.getenv("temp") + "/loopback_minecraft.bat"), loopbackCommand.getBytes(), new OpenOption[0]);
process = Runtime.getRuntime().exec(startScript);
geyserLogger.info(ChatColor.AQUA + "Added loopback exemption to Windows!");
}
} catch (Exception e) {
e.printStackTrace();
geyserLogger.error("Couldn't auto add loopback exemption to Windows!");
}
}
}
}
| [
"\"temp\""
]
| []
| [
"temp"
]
| [] | ["temp"] | java | 1 | 0 | |
common/backbones/imagenet_training/train_imagenet.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# train backbone network with imagenet dataset
#
import os, sys, argparse
import numpy as np
from multiprocessing import cpu_count
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TerminateOnNaN
from tensorflow.keras.utils import multi_gpu_model
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from shufflenet import ShuffleNet
from shufflenet_v2 import ShuffleNetV2
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..'))
from yolo3.models.yolo3_nano import NanoNet
from yolo3.models.yolo3_darknet import DarkNet53
from yolo4.models.yolo4_darknet import CSPDarkNet53
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#import tensorflow as tf
#config = tf.ConfigProto()
#config.gpu_options.allow_growth=True #dynamic alloc GPU resource
#config.gpu_options.per_process_gpu_memory_fraction = 0.9 #GPU memory threshold 0.3
#session = tf.Session(config=config)
## set session
#K.set_session(session)
def preprocess(x):
x = np.expand_dims(x, axis=0)
"""
"mode" option description in preprocess_input
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
"""
#x = preprocess_input(x, mode='tf')
x /= 255.0
x -= 0.5
x *= 2.0
return x
def get_model(model_type, include_top=True):
if model_type == 'shufflenet':
input_shape = (224, 224, 3)
model = ShuffleNet(groups=3, weights=None, include_top=include_top)
elif model_type == 'shufflenet_v2':
input_shape = (224, 224, 3)
model = ShuffleNetV2(bottleneck_ratio=1, weights=None, include_top=include_top)
elif model_type == 'nanonet':
input_shape = (224, 224, 3)
model = NanoNet(weights=None, include_top=include_top)
elif model_type == 'darknet53':
input_shape = (224, 224, 3)
model = DarkNet53(weights=None, include_top=include_top)
elif model_type == 'cspdarknet53':
input_shape = (224, 224, 3)
model = CSPDarkNet53(weights=None, include_top=include_top)
else:
raise ValueError('Unsupported model type')
return model, input_shape[:2]
def get_optimizer(optim_type, learning_rate):
if optim_type == 'sgd':
optimizer = SGD(lr=learning_rate, decay=5e-4, momentum=0.9)
elif optim_type == 'rmsprop':
optimizer = RMSprop(lr=learning_rate)
elif optim_type == 'adam':
optimizer = Adam(lr=learning_rate, decay=5e-4)
else:
raise ValueError('Unsupported optimizer type')
return optimizer
def train(args, model, input_shape):
log_dir = 'logs'
# callbacks for training process
checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-val_loss{val_loss:.3f}-val_acc{val_acc:.3f}-val_top_k_categorical_accuracy{val_top_k_categorical_accuracy:.3f}.h5'),
monitor='val_acc',
mode='max',
verbose=1,
save_weights_only=False,
save_best_only=True,
period=1)
logging = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False, write_grads=False, write_images=False, update_freq='batch')
terminate_on_nan = TerminateOnNaN()
learn_rates = [0.05, 0.01, 0.005, 0.001, 0.0005]
lr_scheduler = LearningRateScheduler(lambda epoch: learn_rates[epoch // 30])
# data generator
train_datagen = ImageDataGenerator(preprocessing_function=preprocess,
zoom_range=0.25,
width_shift_range=0.05,
height_shift_range=0.05,
brightness_range=[0.5,1.5],
rotation_range=30,
shear_range=0.2,
channel_shift_range=0.1,
#rescale=1./255,
vertical_flip=True,
horizontal_flip=True)
test_datagen = ImageDataGenerator(preprocessing_function=preprocess)
train_generator = train_datagen.flow_from_directory(
args.train_data_path,
target_size=input_shape,
batch_size=args.batch_size)
test_generator = test_datagen.flow_from_directory(
args.val_data_path,
target_size=input_shape,
batch_size=args.batch_size)
# get optimizer
optimizer = get_optimizer(args.optim_type, args.learning_rate)
# start training
model.compile(
optimizer=optimizer,
metrics=['accuracy', 'top_k_categorical_accuracy'],
loss='categorical_crossentropy')
print('Train on {} samples, val on {} samples, with batch size {}.'.format(train_generator.samples, test_generator.samples, args.batch_size))
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // args.batch_size,
epochs=args.total_epoch,
workers=cpu_count()-1, #Try to parallized feeding image data but leave one cpu core idle
initial_epoch=args.init_epoch,
use_multiprocessing=True,
max_queue_size=10,
validation_data=test_generator,
validation_steps=test_generator.samples // args.batch_size,
callbacks=[logging, checkpoint, lr_scheduler, terminate_on_nan])
# Finally store model
model.save(os.path.join(log_dir, 'trained_final.h5'))
def evaluate_model(args, model, input_shape):
# eval data generator
eval_datagen = ImageDataGenerator(preprocessing_function=preprocess)
eval_generator = eval_datagen.flow_from_directory(
args.val_data_path,
target_size=input_shape,
batch_size=args.batch_size)
# get optimizer
optimizer = get_optimizer(args.optim_type, args.learning_rate)
# start training
model.compile(
optimizer=optimizer,
metrics=['accuracy', 'top_k_categorical_accuracy'],
loss='categorical_crossentropy')
print('Evaluate on {} samples, with batch size {}.'.format(eval_generator.samples, args.batch_size))
scores = model.evaluate_generator(
eval_generator,
steps=eval_generator.samples // args.batch_size,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=1)
print('Evaluate loss:', scores[0])
print('Top-1 accuracy:', scores[1])
print('Top-k accuracy:', scores[2])
def verify_with_image(model, input_shape):
from tensorflow.keras.applications.resnet50 import decode_predictions
from PIL import Image
while True:
img_file = input('Input image filename:')
try:
img = Image.open(img_file)
resized_img = img.resize(input_shape, Image.BICUBIC)
except:
print('Open Error! Try again!')
continue
else:
img_array = np.asarray(resized_img).astype('float32')
x = preprocess(img_array)
preds = model.predict(x)
print('Predict result:', decode_predictions(preds))
img.show()
def main(args):
include_top = True
if args.dump_headless:
include_top = False
# prepare model
model, input_shape = get_model(args.model_type, include_top=include_top)
if args.weights_path:
model.load_weights(args.weights_path, by_name=True)
# support multi-gpu training
if args.gpu_num >= 2:
model = multi_gpu_model(model, gpus=args.gpu_num)
model.summary()
if args.evaluate:
K.set_learning_phase(0)
evaluate_model(args, model, input_shape)
elif args.verify_with_image:
K.set_learning_phase(0)
verify_with_image(model, input_shape)
elif args.dump_headless:
K.set_learning_phase(0)
model.save(args.output_model_file)
print('export headless model to %s' % str(args.output_model_file))
else:
train(args, model, input_shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str, required=False, default='shufflenet_v2',
help='backbone model type: shufflenet/shufflenet_v2/nanonet/darknet53/cspdarknet53, default=shufflenet_v2')
parser.add_argument('--train_data_path', type=str,# required=True,
help='path to Imagenet train data')
parser.add_argument('--val_data_path', type=str,# required=True,
help='path to Imagenet validation dataset')
parser.add_argument('--weights_path', type=str,required=False, default=None,
help = "Pretrained model/weights file for fine tune")
parser.add_argument('--batch_size', type=int,required=False, default=128,
help = "batch size for train, default=128")
parser.add_argument('--optim_type', type=str, required=False, default='sgd',
help='optimizer type: sgd/rmsprop/adam, default=sgd')
parser.add_argument('--learning_rate', type=float,required=False, default=.05,
help = "Initial learning rate, default=0.05")
parser.add_argument('--init_epoch', type=int,required=False, default=0,
help = "Initial training epochs for fine tune training, default=0")
parser.add_argument('--total_epoch', type=int,required=False, default=200,
help = "Total training epochs, default=200")
parser.add_argument('--gpu_num', type=int, required=False, default=1,
help='Number of GPU to use, default=1')
parser.add_argument('--evaluate', default=False, action="store_true",
help='Evaluate a trained model with validation dataset')
parser.add_argument('--verify_with_image', default=False, action="store_true",
help='Verify trained model with image')
parser.add_argument('--dump_headless', default=False, action="store_true",
help='Dump out classification model to headless backbone model')
parser.add_argument('--output_model_file', type=str,
help='output headless backbone model file')
args = parser.parse_args()
main(args)
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
pkg/runtimes/docker/translate.go | /*
Copyright ยฉ 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package docker
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"github.com/containerd/containerd/log"
"github.com/docker/docker/api/types"
docker "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
l "github.com/rancher/k3d/v5/pkg/logger"
runtimeErr "github.com/rancher/k3d/v5/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/types/fixes"
"inet.af/netaddr"
dockercliopts "github.com/docker/cli/opts"
dockerunits "github.com/docker/go-units"
)
// TranslateNodeToContainer translates a k3d node specification to a docker container representation
func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
init := true
if disableInit, err := strconv.ParseBool(os.Getenv("K3D_DEBUG_DISABLE_DOCKER_INIT")); err == nil && disableInit {
l.Log().Traceln("docker-init disabled for all containers")
init = false
}
/* initialize everything that we need */
containerConfig := docker.Config{}
hostConfig := docker.HostConfig{
Init: &init,
ExtraHosts: node.ExtraHosts,
}
networkingConfig := network.NetworkingConfig{}
/* Name & Image */
containerConfig.Hostname = node.Name
containerConfig.Image = node.Image
/* Command & Arguments */
// FIXME: FixCgroupV2 - to be removed when fixed upstream
if fixes.FixEnabledAny() {
if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole {
containerConfig.Entrypoint = []string{
"/bin/k3d-entrypoint.sh",
}
}
}
containerConfig.Cmd = []string{}
containerConfig.Cmd = append(containerConfig.Cmd, node.Cmd...) // contains k3s command and role-specific required flags/args
containerConfig.Cmd = append(containerConfig.Cmd, node.Args...) // extra flags/args
/* Environment Variables */
containerConfig.Env = node.Env
/* Labels */
containerConfig.Labels = node.RuntimeLabels // has to include the role
/* Auto-Restart */
if node.Restart {
hostConfig.RestartPolicy = docker.RestartPolicy{
Name: "unless-stopped",
}
}
/* Tmpfs Mounts */
hostConfig.Tmpfs = make(map[string]string)
for _, mnt := range k3d.DefaultTmpfsMounts {
hostConfig.Tmpfs[mnt] = ""
}
if node.GPURequest != "" {
gpuopts := dockercliopts.GpuOpts{}
if err := gpuopts.Set(node.GPURequest); err != nil {
return nil, fmt.Errorf("Failed to set GPU Request: %+v", err)
}
hostConfig.DeviceRequests = gpuopts.Value()
}
// memory limits
// fake meminfo is mounted to hostConfig.Binds
if node.Memory != "" {
memory, err := dockerunits.RAMInBytes(node.Memory)
if err != nil {
return nil, fmt.Errorf("Failed to set memory limit: %+v", err)
}
hostConfig.Memory = memory
}
/* They have to run in privileged mode */
// TODO: can we replace this by a reduced set of capabilities?
hostConfig.Privileged = true
/* Volumes */
hostConfig.Binds = node.Volumes
// containerConfig.Volumes = map[string]struct{}{} // TODO: do we need this? We only used binds before
/* Ports */
exposedPorts := nat.PortSet{}
for ep := range node.Ports {
if _, exists := exposedPorts[ep]; !exists {
exposedPorts[ep] = struct{}{}
}
}
containerConfig.ExposedPorts = exposedPorts
hostConfig.PortBindings = node.Ports
/* Network */
endpointsConfig := map[string]*network.EndpointSettings{}
for _, net := range node.Networks {
epSettings := &network.EndpointSettings{}
endpointsConfig[net] = epSettings
}
networkingConfig.EndpointsConfig = endpointsConfig
/* Static IP */
if !node.IP.IP.IsZero() && node.IP.Static {
epconf := networkingConfig.EndpointsConfig[node.Networks[0]]
if epconf.IPAMConfig == nil {
epconf.IPAMConfig = &network.EndpointIPAMConfig{}
}
epconf.IPAMConfig.IPv4Address = node.IP.IP.String()
}
if len(node.Networks) > 0 {
netInfo, err := GetNetwork(context.Background(), node.Networks[0]) // FIXME: only considering first network here, as that's the one k3d creates for a cluster
if err != nil {
l.Log().Warnf("Failed to get network information: %v", err)
} else if netInfo.Driver == "host" {
hostConfig.NetworkMode = "host"
}
}
return &NodeInDocker{
ContainerConfig: containerConfig,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
}, nil
}
// TranslateContainerToNode translates a docker container object into a k3d node representation
func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) {
node := &k3d.Node{
Name: strings.TrimPrefix(cont.Names[0], "/"), // container name with leading '/' cut off
Image: cont.Image,
RuntimeLabels: cont.Labels,
Role: k3d.NodeRoles[cont.Labels[k3d.LabelRole]],
// TODO: all the rest
}
return node, nil
}
// TranslateContainerDetailsToNode translates a docker containerJSON object into a k3d node representation
func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d.Node, error) {
// first, make sure, that it's actually a k3d managed container by checking if it has all the default labels
for k, v := range k3d.DefaultRuntimeLabels {
l.Log().Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s on container %s", k, v, containerDetails.Name)
found := false
for lk, lv := range containerDetails.Config.Labels {
if lk == k && lv == v {
found = true
break
}
}
if !found {
l.Log().Debugf("Container %s is missing default label %s=%s in label set %+v", containerDetails.Name, k, v, containerDetails.Config.Labels)
return nil, runtimeErr.ErrRuntimeContainerUnknown
}
}
// restart -> we only set 'unless-stopped' upon cluster creation
restart := false
if containerDetails.HostConfig.RestartPolicy.IsAlways() || containerDetails.HostConfig.RestartPolicy.IsUnlessStopped() {
restart = true
}
// get networks and ensure that the cluster network is first in list
orderedNetworks := []string{}
otherNetworks := []string{}
for networkName := range containerDetails.NetworkSettings.Networks {
if strings.HasPrefix(networkName, fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, containerDetails.Config.Labels[k3d.LabelClusterName])) { // FIXME: catch error if label 'k3d.cluster' does not exist, but this should also never be the case
orderedNetworks = append(orderedNetworks, networkName)
continue
}
otherNetworks = append(otherNetworks, networkName)
}
orderedNetworks = append(orderedNetworks, otherNetworks...)
/**
* ServerOpts
*/
// IsInit
serverOpts := k3d.ServerOpts{IsInit: false}
clusterInitFlagSet := false
for _, arg := range containerDetails.Args {
if strings.Contains(arg, "--cluster-init") {
clusterInitFlagSet = true
break
}
}
if serverIsInitLabel, ok := containerDetails.Config.Labels[k3d.LabelServerIsInit]; ok {
if serverIsInitLabel == "true" {
if !clusterInitFlagSet {
l.Log().Errorf("Container %s has label %s=true, but the args do not contain the --cluster-init flag", containerDetails.Name, k3d.LabelServerIsInit)
} else {
serverOpts.IsInit = true
}
}
}
// Kube API
serverOpts.KubeAPI = &k3d.ExposureOpts{}
for k, v := range containerDetails.Config.Labels {
if k == k3d.LabelServerAPIHostIP {
serverOpts.KubeAPI.Binding.HostIP = v
} else if k == k3d.LabelServerAPIHost {
serverOpts.KubeAPI.Host = v
} else if k == k3d.LabelServerAPIPort {
serverOpts.KubeAPI.Binding.HostPort = v
}
}
// labels: only copy k3d.* labels
labels := map[string]string{}
for k, v := range containerDetails.Config.Labels {
if strings.HasPrefix(k, "k3d") {
labels[k] = v
}
}
// status
nodeState := k3d.NodeState{
Running: containerDetails.ContainerJSONBase.State.Running,
Status: containerDetails.ContainerJSONBase.State.Status,
}
// memory limit
memoryStr := dockerunits.HumanSize(float64(containerDetails.HostConfig.Memory))
// no-limit is returned as 0B, filter this out
if memoryStr == "0B" {
memoryStr = ""
}
// IP
var nodeIP k3d.NodeIP
var clusterNet *network.EndpointSettings
if netLabel, ok := labels[k3d.LabelNetwork]; ok {
for netName, net := range containerDetails.NetworkSettings.Networks {
if netName == netLabel {
clusterNet = net
}
}
} else {
l.Log().Debugf("no netlabel present on container %s", containerDetails.Name)
}
if clusterNet != nil {
parsedIP, err := netaddr.ParseIP(clusterNet.IPAddress)
if err != nil {
if nodeState.Running && nodeState.Status != "restarting" { // if the container is not running or currently restarting, it won't have an IP, so we don't error in that case
return nil, fmt.Errorf("failed to parse IP '%s' for container '%s': %s\nStatus: %v\n%+v", clusterNet.IPAddress, containerDetails.Name, err, nodeState.Status, containerDetails.NetworkSettings)
} else {
log.L.Tracef("failed to parse IP '%s' for container '%s', likely because it's not running (or restarting): %v", clusterNet.IPAddress, containerDetails.Name, err)
}
}
isStaticIP := false
if staticIPLabel, ok := labels[k3d.LabelNodeStaticIP]; ok && staticIPLabel != "" {
isStaticIP = true
}
if !parsedIP.IsZero() {
nodeIP = k3d.NodeIP{
IP: parsedIP,
Static: isStaticIP,
}
}
} else {
l.Log().Debugf("failed to get IP for container %s as we couldn't find the cluster network", containerDetails.Name)
}
node := &k3d.Node{
Name: strings.TrimPrefix(containerDetails.Name, "/"), // container name with leading '/' cut off
Role: k3d.NodeRoles[containerDetails.Config.Labels[k3d.LabelRole]],
Image: containerDetails.Image,
Volumes: containerDetails.HostConfig.Binds,
Env: containerDetails.Config.Env,
Cmd: containerDetails.Config.Cmd,
Args: []string{}, // empty, since Cmd already contains flags
Ports: containerDetails.HostConfig.PortBindings,
Restart: restart,
Created: containerDetails.Created,
RuntimeLabels: labels,
Networks: orderedNetworks,
ServerOpts: serverOpts,
AgentOpts: k3d.AgentOpts{},
State: nodeState,
Memory: memoryStr,
IP: nodeIP, // only valid for the cluster network
}
return node, nil
}
| [
"\"K3D_DEBUG_DISABLE_DOCKER_INIT\""
]
| []
| [
"K3D_DEBUG_DISABLE_DOCKER_INIT"
]
| [] | ["K3D_DEBUG_DISABLE_DOCKER_INIT"] | go | 1 | 0 | |
common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.conf;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.type.TimestampTZUtil;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.SizeValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.common.HiveCompat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.LoginException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URL;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Hive Configuration.
*/
public class HiveConf extends Configuration {
protected String hiveJar;
protected Properties origProp;
protected String auxJars;
private static final Logger LOG = LoggerFactory.getLogger(HiveConf.class);
private static boolean loadMetastoreConfig = false;
private static boolean loadHiveServer2Config = false;
private static URL hiveDefaultURL = null;
private static URL hiveSiteURL = null;
private static URL hivemetastoreSiteUrl = null;
private static URL hiveServer2SiteUrl = null;
private static byte[] confVarByteArray = null;
private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
private final List<String> restrictList = new ArrayList<String>();
private final Set<String> hiddenSet = new HashSet<String>();
private final List<String> rscList = new ArrayList<>();
private Pattern modWhiteListPattern = null;
private volatile boolean isSparkConfigUpdated = false;
private static final int LOG_PREFIX_LENGTH = 64;
public boolean getSparkConfigUpdated() {
return isSparkConfigUpdated;
}
public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
this.isSparkConfigUpdated = isSparkConfigUpdated;
}
public interface EncoderDecoder<K, V> {
V encode(K key);
K decode(V value);
}
public static class URLEncoderDecoder implements EncoderDecoder<String, String> {
private static final String UTF_8 = "UTF-8";
@Override
public String encode(String key) {
try {
return URLEncoder.encode(key, UTF_8);
} catch (UnsupportedEncodingException e) {
return key;
}
}
@Override
public String decode(String value) {
try {
return URLDecoder.decode(value, UTF_8);
} catch (UnsupportedEncodingException e) {
return value;
}
}
}
public static class EncoderDecoderFactory {
public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder();
}
static {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = HiveConf.class.getClassLoader();
}
hiveDefaultURL = classLoader.getResource("hive-default.xml");
// Look for hive-site.xml on the CLASSPATH and log its location if found.
hiveSiteURL = findConfigFile(classLoader, "hive-site.xml", true);
hivemetastoreSiteUrl = findConfigFile(classLoader, "hivemetastore-site.xml", false);
hiveServer2SiteUrl = findConfigFile(classLoader, "hiveserver2-site.xml", false);
for (ConfVars confVar : ConfVars.values()) {
vars.put(confVar.varname, confVar);
}
Set<String> llapDaemonConfVarsSetLocal = new LinkedHashSet<>();
populateLlapDaemonVarsSet(llapDaemonConfVarsSetLocal);
llapDaemonVarsSet = Collections.unmodifiableSet(llapDaemonConfVarsSetLocal);
}
private static URL findConfigFile(ClassLoader classLoader, String name, boolean doLog) {
URL result = classLoader.getResource(name);
if (result == null) {
String confPath = System.getenv("HIVE_CONF_DIR");
result = checkConfigFile(new File(confPath, name));
if (result == null) {
String homePath = System.getenv("HIVE_HOME");
String nameInConf = "conf" + File.separator + name;
result = checkConfigFile(new File(homePath, nameInConf));
if (result == null) {
URI jarUri = null;
try {
// Handle both file:// and jar:<url>!{entry} in the case of shaded hive libs
URL sourceUrl = HiveConf.class.getProtectionDomain().getCodeSource().getLocation();
jarUri = sourceUrl.getProtocol().equalsIgnoreCase("jar") ? new URI(sourceUrl.getPath()) : sourceUrl.toURI();
} catch (Throwable e) {
LOG.info("Cannot get jar URI", e);
System.err.println("Cannot get jar URI: " + e.getMessage());
}
// From the jar file, the parent is /lib folder
File parent = new File(jarUri).getParentFile();
if (parent != null) {
result = checkConfigFile(new File(parent.getParentFile(), nameInConf));
}
}
}
}
if (doLog) {
LOG.info("Found configuration file {}", result);
}
return result;
}
private static URL checkConfigFile(File f) {
try {
return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
} catch (Throwable e) {
LOG.info("Error looking for config {}", f, e);
System.err.println("Error looking for config " + f + ": " + e.getMessage());
return null;
}
}
@InterfaceAudience.Private
public static final String PREFIX_LLAP = "llap.";
@InterfaceAudience.Private
public static final String PREFIX_HIVE_LLAP = "hive.llap.";
/**
* Metastore related options that the db is initialized against. When a conf
* var in this is list is changed, the metastore instance for the CLI will
* be recreated so that the change will take effect.
*/
public static final HiveConf.ConfVars[] metaVars = {
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.REPLDIR,
HiveConf.ConfVars.METASTOREURIS,
HiveConf.ConfVars.METASTORESELECTION,
HiveConf.ConfVars.METASTORE_SERVER_PORT,
HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
HiveConf.ConfVars.METASTOREPWD,
HiveConf.ConfVars.METASTORECONNECTURLHOOK,
HiveConf.ConfVars.METASTORECONNECTURLKEY,
HiveConf.ConfVars.METASTORESERVERMINTHREADS,
HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE,
HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
HiveConf.ConfVars.METASTORE_DATANUCLEUS_INIT_COL_INFO,
HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL,
HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_EVENT_MESSAGE_FACTORY,
HiveConf.ConfVars.METASTORE_FILTER_HOOK,
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
HiveConf.ConfVars.METASTORE_INIT_HOOKS,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
HiveConf.ConfVars.HMSHANDLERATTEMPTS,
HiveConf.ConfVars.HMSHANDLERINTERVAL,
HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
HiveConf.ConfVars.HIVE_TXN_MANAGER,
HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES,
HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
HiveConf.ConfVars.METASTORE_FASTPATH,
HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS,
HiveConf.ConfVars.METASTORE_WM_DEFAULT_POOL_SIZE
};
/**
* User configurable Metastore vars
*/
public static final HiveConf.ConfVars[] metaConfVars = {
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_CAPABILITY_CHECK,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
};
static {
for (ConfVars confVar : metaConfVars) {
metaConfs.put(confVar.varname, confVar);
}
}
public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal";
public static final String HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME =
"hive.server2.authentication.ldap.userMembershipKey";
/**
* dbVars are the parameters can be set per database. If these
* parameters are set as a database property, when switching to that
* database, the HiveConf variable will be changed. The change of these
* parameters will effectively change the DFS and MapReduce clusters
* for different databases.
*/
public static final HiveConf.ConfVars[] dbVars = {
HiveConf.ConfVars.HADOOPBIN,
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.SCRATCHDIR
};
/**
* encoded parameter values are ;-) encoded. Use decoder to get ;-) decoded string
*/
public static final HiveConf.ConfVars[] ENCODED_CONF = {
ConfVars.HIVEQUERYSTRING
};
/**
* Variables used by LLAP daemons.
* TODO: Eventually auto-populate this based on prefixes. The conf variables
* will need to be renamed for this.
*/
private static final Set<String> llapDaemonVarsSet;
private static void populateLlapDaemonVarsSet(Set<String> llapDaemonVarsSetLocal) {
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MODE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_DIRECT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_USE_LRFU.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_KEYTAB_FILE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_ZK_CONNECTION_STRING.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_WEB_AUTO_AUTH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WORK_DIRS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_XMX_HEADROOM.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_REFRESH_INTERVAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_VALIDATE_ACLS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_LOGGER.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_USE_FQDN.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_OUTPUT_FORMAT_ARROW.varname);
}
/**
* Get a set containing configuration parameter names used by LLAP Server isntances
* @return an unmodifiable set containing llap ConfVars
*/
public static final Set<String> getLlapDaemonConfVars() {
return llapDaemonVarsSet;
}
/**
* ConfVars.
*
* These are the default configuration properties for Hive. Each HiveConf
* object is initialized as follows:
*
* 1) Hadoop configuration properties are applied.
* 2) ConfVar properties with non-null values are overlayed.
* 3) hive-site.xml properties are overlayed.
*
* WARNING: think twice before adding any Hadoop configuration properties
* with non-null values to this list as they will override any values defined
* in the underlying Hadoop configuration.
*/
public static enum ConfVars {
// QL execution stuff
SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
PLAN("hive.exec.plan", "", ""),
STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
"Directory name that will be created inside table locations in order to support HDFS encryption. " +
"This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
"In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
"HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
"For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
"with ${hive.scratch.dir.permission}."),
REPLDIR("hive.repl.rootdir","/user/${system:user.name}/repl/",
"HDFS root dir for all replication dumps."),
REPLCMENABLED("hive.repl.cm.enabled", false,
"Turn on ChangeManager, so delete files will go to cmrootdir."),
REPLCMDIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/",
"Root dir for ChangeManager, used for deleted files."),
REPLCMRETIAN("hive.repl.cm.retain","24h",
new TimeValidator(TimeUnit.HOURS),
"Time to retain removed files in cmrootdir."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
"Inteval for cmroot cleanup thread."),
REPL_FUNCTIONS_ROOT_DIR("hive.repl.replica.functions.root.dir","/user/${system:user.name}/repl/functions/",
"Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse"),
REPL_APPROX_MAX_LOAD_TASKS("hive.repl.approx.max.load.tasks", 10000,
"Provide an approximation of the maximum number of tasks that should be executed before \n"
+ "dynamically generating the next set of tasks. The number is approximate as Hive \n"
+ "will stop at a slightly higher number, the reason being some events might lead to a \n"
+ "task increment that would cross the specified limit."),
REPL_PARTITIONS_DUMP_PARALLELISM("hive.repl.partitions.dump.parallelism",100,
"Number of threads that will be used to dump partition data information during repl dump."),
REPL_DUMPDIR_CLEAN_FREQ("hive.repl.dumpdir.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired dump dirs."),
REPL_DUMPDIR_TTL("hive.repl.dumpdir.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"TTL of dump dirs before cleanup."),
REPL_DUMP_METADATA_ONLY("hive.repl.dump.metadata.only", false,
"Indicates whether replication dump only metadata information or data + metadata. \n"
+ "This config makes hive.repl.include.external.tables config ineffective."),
REPL_DUMP_INCLUDE_ACID_TABLES("hive.repl.dump.include.acid.tables", false,
"Indicates if repl dump should include information about ACID tables. It should be \n"
+ "used in conjunction with 'hive.repl.dump.metadata.only' to enable copying of \n"
+ "metadata for acid tables which do not require the corresponding transaction \n"
+ "semantics to be applied on target. This can be removed when ACID table \n"
+ "replication is supported."),
REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT("hive.repl.bootstrap.dump.open.txn.timeout", "1h",
new TimeValidator(TimeUnit.HOURS),
"Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. "
+ "If these open transactions are not closed within the timeout value, then REPL DUMP will "
+ "forcefully abort those transactions and continue with bootstrap dump."),
//https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html#Running_as_the_superuser
REPL_ADD_RAW_RESERVED_NAMESPACE("hive.repl.add.raw.reserved.namespace", false,
"For TDE with same encryption keys on source and target, allow Distcp super user to access \n"
+ "the raw bytes from filesystem without decrypting on source and then encrypting on target."),
REPL_INCLUDE_EXTERNAL_TABLES("hive.repl.include.external.tables", false,
"Indicates if repl dump should include information about external tables. It should be \n"
+ "used in conjunction with 'hive.repl.dump.metadata.only' set to false. if 'hive.repl.dump.metadata.only' \n"
+ " is set to true then this config parameter has no effect as external table meta data is flushed \n"
+ " always by default."),
REPL_ENABLE_MOVE_OPTIMIZATION("hive.repl.enable.move.optimization", false,
"If its set to true, REPL LOAD copies data files directly to the target table/partition location \n"
+ "instead of copying to staging directory first and then move to target location. This optimizes \n"
+ " the REPL LOAD on object data stores such as S3 or WASB where creating a directory and move \n"
+ " files are costly operations. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
REPL_MOVE_OPTIMIZED_FILE_SCHEMES("hive.repl.move.optimized.scheme", "s3a, wasb",
"Comma separated list of schemes for which move optimization will be enabled during repl load. \n"
+ "This configuration overrides the value set using REPL_ENABLE_MOVE_OPTIMIZATION for the given schemes. \n"
+ " Schemes of the file system which does not support atomic move (rename) can be specified here to \n "
+ " speed up the repl load operation. In file system like HDFS where move operation is atomic, this \n"
+ " optimization should not be enabled as it may lead to inconsistent data read for non acid tables."),
LOCALSCRATCHDIR("hive.exec.local.scratchdir",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Local scratch space for Hive jobs"),
DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
"${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
"Temporary local directory for added resources in the remote file system."),
SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
"The permission for the user specific scratch directories that get created."),
SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
"Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
"separate JVM (true recommended) or not. \n" +
"Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
"Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
"This prevents runaway scripts from filling logs partitions to capacity"),
ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
"When enabled, this option allows a user script to exit successfully without consuming \n" +
"all the data from the standard input."),
STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
"Streaming jobs that log to standard error with this prefix can log counter or status information."),
STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
"Enable consumption of status and counter messages for streaming jobs."),
COMPRESSRESULT("hive.exec.compress.output", false,
"This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
"This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
"size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
MAXREDUCERS("hive.exec.reducers.max", 1009,
"max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
"negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
PREEXECHOOKS("hive.exec.pre.hooks", "",
"Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
"A pre-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
POSTEXECHOOKS("hive.exec.post.hooks", "",
"Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
"A post-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
ONFAILUREHOOKS("hive.exec.failure.hooks", "",
"Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
"An on-failure hook is specified as the name of Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
"Comma-separated list of hooks to be invoked for each query which can \n" +
"tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
"extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
"Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
"A client stats publisher is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
ATSHOOKQUEUECAPACITY("hive.ats.hook.queue.capacity", 64,
"Queue size for the ATS Hook executor. If the number of outstanding submissions \n" +
"to the ATS executor exceed this amount, the Hive ATS Hook will not try to log queries to ATS."),
EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
"How many jobs at most can be executed in parallel"),
HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
"Whether speculative execution for reducers should be turned on. "),
HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
"The interval with which to poll the JobTracker for the counters the running job. \n" +
"The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
"Whether or not to allow dynamic partitions in DML/DDL."),
DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
"In strict mode, the user must specify at least one static partition\n" +
"in case the user accidentally overwrites all partitions.\n" +
"In nonstrict mode all partitions are allowed to be dynamic."),
DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
"Maximum number of dynamic partitions allowed to be created in total."),
DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
"Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
"Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
"The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
"This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
"The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
// Whether to show a link to the most failed task + debugging tips
SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
"If a job fails, whether to provide a link in the CLI to the task with the\n" +
"most failures, along with debugging hints if applicable."),
JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
"Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
"for each failed job should be stored in the SessionState"),
JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
"String used as a file extension for output files. \n" +
"If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
HIVE_IN_TEST_SSL("hive.in.ssl.test", false, "internal usage only, true in SSL test mode", true),
// TODO: this needs to be removed; see TestReplicationScenarios* comments.
HIVE_IN_TEST_REPL("hive.in.repl.test", false, "internal usage only, true in replication test mode", true),
HIVE_IN_TEST_IDE("hive.in.ide.test", false, "internal usage only, true if test running in ide",
true),
HIVE_TESTING_SHORT_LOGS("hive.testing.short.logs", false,
"internal usage only, used only in test mode. If set true, when requesting the " +
"operation logs the short version (generated by LogDivertAppenderForTest) will be " +
"returned"),
HIVE_TESTING_REMOVE_LOGS("hive.testing.remove.logs", true,
"internal usage only, used only in test mode. If set false, the operation logs, and the " +
"operation log directory will not be removed, so they can be found after the test runs."),
HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
true),
HIVE_MAPJOIN_TESTING_NO_HASH_TABLE_LOAD("hive.mapjoin.testing.no.hash.table.load", false, "internal use only, true when in testing map join",
true),
HIVE_IN_REPL_TEST_FILES_SORTED("hive.in.repl.test.files.sorted", false,
"internal usage only, set to true if the file listing is required in sorted order during bootstrap load", true),
LOCALMODEAUTO("hive.exec.mode.local.auto", false,
"Let Hive determine whether to run in local mode automatically"),
LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
"When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
"Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
"maximum number of lines for footer user can define for a table file"),
HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
"Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
"Table alias will be added to column names for queries of type \"select *\" or \n" +
"if query explicitly uses table alias \"select r1.x..\"."),
HIVE_PROTO_EVENTS_BASE_PATH("hive.hook.proto.base-directory", "",
"Base directory into which the proto event messages are written by HiveProtoLoggingHook."),
HIVE_PROTO_EVENTS_ROLLOVER_CHECK_INTERVAL("hive.hook.proto.rollover-interval", "600s",
new TimeValidator(TimeUnit.SECONDS, 0L, true, 3600 * 24L, true),
"Frequency at which the file rollover check is triggered."),
HIVE_PROTO_EVENTS_CLEAN_FREQ("hive.hook.proto.events.clean.freq", "1d",
new TimeValidator(TimeUnit.DAYS),
"Frequency at which timer task runs to purge expired proto event files."),
HIVE_PROTO_EVENTS_TTL("hive.hook.proto.events.ttl", "7d",
new TimeValidator(TimeUnit.DAYS),
"Time-To-Live (TTL) of proto event files before cleanup."),
HIVE_PROTO_FILE_PER_EVENT("hive.hook.proto.file.per.event", false,
"Whether each proto event has to be written to separate file. " +
"(Use this for FS that does not hflush immediately like S3A)"),
// Hadoop Configuration Properties
// Properties with null values are ignored and exist only for the purpose of giving us
// a symbolic name to reference in the Hive source code. Properties with non-null
// values will override any values set in the underlying Hadoop configuration.
HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
YARNBIN("yarn.bin.path", findYarnBinary(), "", true),
MAPREDBIN("mapred.bin.path", findMapRedBinary(), "", true),
HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
"The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true),
MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true),
MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true),
MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true),
// The number of reduce tasks per job. Hadoop sets this value to 1 by default
// By setting this property to -1, Hive will automatically determine the correct
// number of reducers.
HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true),
// Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"),
"Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."),
/**
* @deprecated Use MetastoreConf.WAREHOUSE
*/
@Deprecated
METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
"location of default database for the warehouse"),
HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null,
"Default location for external tables created in the warehouse. " +
"If not set or null, then the normal warehouse location will be used as the default location."),
/**
* @deprecated Use MetastoreConf.THRIFT_URIS
*/
@Deprecated
METASTOREURIS("hive.metastore.uris", "",
"Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
/**
* @deprecated Use MetastoreConf.THRIFT_URI_SELECTION
*/
@Deprecated
METASTORESELECTION("hive.metastore.uri.selection", "RANDOM",
new StringSet("SEQUENTIAL", "RANDOM"),
"Determines the selection mechanism used by metastore client to connect to remote " +
"metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " +
"as part of hive.metastore.uris will be picked. RANDOM implies that the metastore " +
"will be picked randomly"),
/**
* @deprecated Use MetastoreConf.CAPABILITY_CHECK
*/
@Deprecated
METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true,
"Whether to check client capabilities for potentially breaking API usage."),
METASTORE_CLIENT_CACHE_ENABLED("hive.metastore.client.cache.enabled", false,
"Whether to enable metastore client cache"),
METASTORE_CLIENT_CACHE_EXPIRY_TIME("hive.metastore.client.cache.expiry.time", "120s",
new TimeValidator(TimeUnit.SECONDS), "Expiry time for metastore client cache"),
METASTORE_CLIENT_CACHE_INITIAL_CAPACITY("hive.metastore.client.cache.initial.capacity", 50,
"Initial capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_MAX_CAPACITY("hive.metastore.client.cache.max.capacity", 50,
"Max capacity for metastore client cache"),
METASTORE_CLIENT_CACHE_STATS_ENABLED("hive.metastore.client.cache.stats.enabled", false,
"Whether to enable metastore client cache stats"),
METASTORE_FASTPATH("hive.metastore.fastpath", false,
"Used to avoid all of the proxies and object copies in the metastore. Note, if this is " +
"set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
"undefined and most likely undesired behavior will result"),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_THREADS_COUNT
*/
@Deprecated
METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15,
"Number of threads to be allocated for metastore handler for fs operations."),
/**
* @deprecated Use MetastoreConf.FILE_METADATA_THREADS
*/
@Deprecated
METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1,
"Number of threads to use to read file metadata in background to cache it."),
/**
* @deprecated Use MetastoreConf.URI_RESOLVER
*/
@Deprecated
METASTORE_URI_RESOLVER("hive.metastore.uri.resolver", "",
"If set, fully qualified class name of resolver for hive metastore uri's"),
/**
* @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES
*/
@Deprecated
METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
"Number of retries while opening a connection to metastore"),
/**
* @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES
*/
@Deprecated
METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
"Number of retries upon failure of Thrift metastore calls"),
/**
* @deprecated Use MetastoreConf.SERVER_PORT
*/
@Deprecated
METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
/**
* @deprecated Use MetastoreConf.CLIENT_CONNECT_RETRY_DELAY
*/
@Deprecated
METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for the client to wait between consecutive connection attempts"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_TIMEOUT
*/
@Deprecated
METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket timeout in seconds"),
/**
* @deprecated Use MetastoreConf.CLIENT_SOCKET_LIFETIME
*/
@Deprecated
METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
"reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
"has an infinite lifetime."),
/**
* @deprecated Use MetastoreConf.PWD
*/
@Deprecated
METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_HOOK
*/
@Deprecated
METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
"Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
/**
* @deprecated Use MetastoreConf.MULTITHREADED
*/
@Deprecated
METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
"Set this to true if multiple threads access metastore through JDO concurrently."),
/**
* @deprecated Use MetastoreConf.CONNECT_URL_KEY
*/
@Deprecated
METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
"jdbc:derby:;databaseName=metastore_db;create=true",
"JDBC connect string for a JDBC metastore.\n" +
"To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
"For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
/**
* @deprecated Use MetastoreConf.DBACCESS_SSL_PROPS
*/
@Deprecated
METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "",
"Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
"enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS
*/
@Deprecated
HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
"The number of times to retry a HMSHandler call if there were a connection error."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL
*/
@Deprecated
HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
/**
* @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF
*/
@Deprecated
HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
"datastore. Once reloaded, this value is reset to false. Used for\n" +
"testing only."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE
*/
@Deprecated
METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L,
"Maximum message size in bytes a HMS will accept."),
/**
* @deprecated Use MetastoreConf.SERVER_MIN_THREADS
*/
@Deprecated
METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
"Minimum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.SERVER_MAX_THREADS
*/
@Deprecated
METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
"Maximum number of worker threads in the Thrift server's pool."),
/**
* @deprecated Use MetastoreConf.TCP_KEEP_ALIVE
*/
@Deprecated
METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
/**
* @deprecated Use MetastoreConf.WM_DEFAULT_POOL_SIZE
*/
@Deprecated
METASTORE_WM_DEFAULT_POOL_SIZE("hive.metastore.wm.default.pool.size", 4,
"The size of a default pool to create when creating an empty resource plan;\n" +
"If not positive, no default pool will be created."),
METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
"_INTERMEDIATE_ORIGINAL",
"Intermediate dir suffixes used for archiving. Not important what they\n" +
"are, as long as collisions are avoided"),
METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
"_INTERMEDIATE_ARCHIVED", ""),
METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
"_INTERMEDIATE_EXTRACTED", ""),
/**
* @deprecated Use MetastoreConf.KERBEROS_KEYTAB_FILE
*/
@Deprecated
METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
"The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
/**
* @deprecated Use MetastoreConf.KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
"hive-metastore/[email protected]",
"The service principal for the metastore Thrift server. \n" +
"The special string _HOST will be replaced automatically with the correct host name."),
/**
* @deprecated Use MetastoreConf.CLIENT_KERBEROS_PRINCIPAL
*/
@Deprecated
METASTORE_CLIENT_KERBEROS_PRINCIPAL("hive.metastore.client.kerberos.principal",
"", // E.g. "hive-metastore/[email protected]".
"The Kerberos principal associated with the HA cluster of hcat_servers."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_SASL
*/
@Deprecated
METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
"If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_FRAMED_TRANSPORT
*/
@Deprecated
METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
"If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
/**
* @deprecated Use MetastoreConf.USE_THRIFT_COMPACT_PROTOCOL
*/
@Deprecated
METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
"If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
"Setting it to true will break compatibility with older clients running TBinaryProtocol."),
/**
* @deprecated Use MetastoreConf.TOKEN_SIGNATURE
*/
@Deprecated
METASTORE_TOKEN_SIGNATURE("hive.metastore.token.signature", "",
"The delegation token service name to match when selecting a token from the current user's tokens."),
/**
* @deprecated Use MetastoreConf.DELEGATION_TOKEN_STORE_CLS
*/
@Deprecated
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
"org.apache.hadoop.hive.thrift.MemoryTokenStore",
"The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
"hive.cluster.delegation.token.store.zookeeper.connectString", "",
"The ZooKeeper token store connect string. You can re-use the configuration value\n" +
"set in hive.zookeeper.quorum, by leaving this parameter unset."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
"hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
"The root path for token store data. Note that this is used by both HiveServer2 and\n" +
"MetaStore to store delegation Token. One directory gets created for each of them.\n" +
"The final directory names would have the servername appended to it (HIVESERVER2,\n" +
"METASTORE)."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
"hive.cluster.delegation.token.store.zookeeper.acl", "",
"ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
"sasl:hive/[email protected]:cdrwa,sasl:hive/[email protected]:cdrwa\n" +
"Defaults to all permissions for the hiveserver2/metastore process user."),
/**
* @deprecated Use MetastoreConf.CACHE_PINOBJTYPES
*/
@Deprecated
METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
"List of comma separated metastore object types that should be pinned in the cache"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_TYPE
*/
@Deprecated
METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP",
"HikariCP", "NONE"),
"Specify connection pool library for datanucleus"),
/**
* @deprecated Use MetastoreConf.CONNECTION_POOLING_MAX_CONNECTIONS
*/
@Deprecated
METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10,
"Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
"2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
"recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
"configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
"(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
"(2 * physical_core_count + hard_disk_count)."),
// Workaround for DN bug on Postgres:
// http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_INIT_COL_INFO
*/
@Deprecated
METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE",
"initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
/**
* @deprecated Use MetastoreConf.VALIDATE_TABLES
*/
@Deprecated
METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_COLUMNS
*/
@Deprecated
METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.VALIDATE_CONSTRAINTS
*/
@Deprecated
METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
/**
* @deprecated Use MetastoreConf.STORE_MANAGER_TYPE
*/
@Deprecated
METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
/**
* @deprecated Use MetastoreConf.AUTO_CREATE_ALL
*/
@Deprecated
METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false,
"Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+ "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+ "recommended for production use cases, run schematool command instead." ),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", true,
"Enforce metastore schema version consistency.\n" +
"True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic\n" +
" schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
" proper metastore schema migration. (Default)\n" +
"False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
/**
* @deprecated Use MetastoreConf.SCHEMA_VERIFICATION_RECORD_VERSION
*/
@Deprecated
METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", false,
"When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
" enabled the MS will be unusable."),
/**
* @deprecated Use MetastoreConf.SCHEMA_INFO_CLASS
*/
@Deprecated
METASTORE_SCHEMA_INFO_CLASS("hive.metastore.schema.info.class",
"org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
"Fully qualified class name for the metastore schema information class \n"
+ "which is used by schematool to fetch the schema information.\n"
+ " This class should implement the IMetaStoreSchemaInfo interface"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_TRANSACTION_ISOLATION
*/
@Deprecated
METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
"Default transaction isolation level for identity generation."),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_CACHE_LEVEL2
*/
@Deprecated
METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
"Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
/**
* @deprecated Use MetastoreConf.IDENTIFIER_FACTORY
*/
@Deprecated
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
"Name of the identifier factory to use when generating table/column names etc. \n" +
"'datanucleus1' is used for backward compatibility with DataNucleus v1"),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY
*/
@Deprecated
METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
/**
* @deprecated Use MetastoreConf.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK
*/
@Deprecated
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
"Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
"Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
"The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
"but it may also cause higher memory requirement at the client side."),
/**
* @deprecated Use MetastoreConf.BATCH_RETRIEVE_OBJECTS_MAX
*/
@Deprecated
METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
"hive.metastore.batch.retrieve.table.partition.max", 1000,
"Maximum number of objects that metastore internally retrieves in one batch."),
/**
* @deprecated Use MetastoreConf.INIT_HOOKS
*/
@Deprecated
METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
"A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
"An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
/**
* @deprecated Use MetastoreConf.PRE_EVENT_LISTENERS
*/
@Deprecated
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
"List of comma separated listeners for metastore events."),
/**
* @deprecated Use MetastoreConf.EVENT_LISTENERS
*/
@Deprecated
METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
"Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
/**
* @deprecated Use MetastoreConf.TRANSACTIONAL_EVENT_LISTENERS
*/
@Deprecated
METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("hive.notification.sequence.lock.max.retries", 10,
"Number of retries required to acquire a lock when getting the next notification sequential ID for entries "
+ "in the NOTIFICATION_LOG table."),
/**
* @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL
*/
@Deprecated
NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL("hive.notification.sequence.lock.retry.sleep.interval", 10L,
new TimeValidator(TimeUnit.SECONDS),
"Sleep interval between retries to acquire a notification lock as described part of property "
+ NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()),
/**
* @deprecated Use MetastoreConf.EVENT_DB_LISTENER_TTL
*/
@Deprecated
METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"time after which events will be removed from the database listener queue"),
/**
* @deprecated Use MetastoreConf.EVENT_DB_NOTIFICATION_API_AUTH
*/
@Deprecated
METASTORE_EVENT_DB_NOTIFICATION_API_AUTH("hive.metastore.event.db.notification.api.auth", true,
"Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
"If set to true, then only the superusers in proxy settings have the permission"),
/**
* @deprecated Use MetastoreConf.AUTHORIZATION_STORAGE_AUTH_CHECKS
*/
@Deprecated
METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
"Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
"for operations like drop-partition (disallow the drop-partition if the user in\n" +
"question doesn't have permissions to delete the corresponding directory\n" +
"on the storage)."),
METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK("hive.metastore.authorization.storage.check.externaltable.drop", true,
"Should StorageBasedAuthorization check permission of the storage before dropping external table.\n" +
"StorageBasedAuthorization already does this check for managed table. For external table however,\n" +
"anyone who has read permission of the directory could drop external table, which is surprising.\n" +
"The flag is set to false by default to maintain backward compatibility."),
/**
* @deprecated Use MetastoreConf.EVENT_CLEAN_FREQ
*/
@Deprecated
METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired events in metastore."),
/**
* @deprecated Use MetastoreConf.EVENT_EXPIRY_DURATION
*/
@Deprecated
METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Duration after which events expire from events table"),
/**
* @deprecated Use MetastoreConf.EVENT_MESSAGE_FACTORY
*/
@Deprecated
METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory",
"org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
"Factory class for making encoding and decoding messages in the events generated."),
/**
* @deprecated Use MetastoreConf.EXECUTE_SET_UGI
*/
@Deprecated
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
"both the client and server sides. Further note that its best effort. \n" +
"If client sets its to true and server sets it to false, client setting will be ignored."),
/**
* @deprecated Use MetastoreConf.PARTITION_NAME_WHITELIST_PATTERN
*/
@Deprecated
METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
"Partition names will be checked against this regex pattern and rejected if not matched."),
/**
* @deprecated Use MetastoreConf.INTEGER_JDO_PUSHDOWN
*/
@Deprecated
METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
"Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
"improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
"However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
"leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
"is also irrelevant."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
"Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
"DataNucleus for certain read paths. This can improve metastore performance when\n" +
"fetching many partitions or column statistics by orders of magnitude; however, it\n" +
"is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
"the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
"work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
"metastore is backed by MongoDB), you might want to disable this to save the\n" +
"try-and-fall-back cost."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_PARTITION_BATCH_SIZE
*/
@Deprecated
METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
"Batch size for partition and other object retrieval from the underlying DB in direct\n" +
"SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
"that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
"may impede performance. -1 means no batching, 0 means automatic batching."),
/**
* @deprecated Use MetastoreConf.TRY_DIRECT_SQL_DDL
*/
@Deprecated
METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
"Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
"modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
"select query has incorrect syntax or something similar inside a transaction, the\n" +
"entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
"should disable the usage of direct SQL inside transactions if that happens in your case."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_QUERY_LENGTH
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" +
" size of a query string (in KB)."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000,
"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
" multiple OR separated IN clauses."),
/**
* @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE
*/
@Deprecated
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
1000, "The maximum number of values in a VALUES clause for INSERT statement."),
/**
* @deprecated Use MetastoreConf.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS
*/
@Deprecated
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
"Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
"either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
"as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
"pruning is the correct behaviour"),
/**
* @deprecated Use MetastoreConf.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
*/
@Deprecated
METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
"hive.metastore.disallow.incompatible.col.type.changes", true,
"If true (default is false), ALTER TABLE operations which change the type of a\n" +
"column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
"RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
"datatypes can be converted from string to any type. The map is also serialized as\n" +
"a string, which can be read as a string as well. However, with any binary\n" +
"serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
"when subsequently trying to access old partitions.\n" +
"\n" +
"Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
"not blocked.\n" +
"\n" +
"See HIVE-4409 for more details."),
/**
* @deprecated Use MetastoreConf.LIMIT_PARTITION_REQUEST
*/
@Deprecated
METASTORE_LIMIT_PARTITION_REQUEST("hive.metastore.limit.partition.request", -1,
"This limits the number of partitions that can be requested from the metastore for a given table.\n" +
"The default value \"-1\" means no limit."),
NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
"Default property values for newly created tables"),
DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
"Table Properties to copy over when executing a Create Table Like."),
/**
* @deprecated Use MetastoreConf.RAW_STORE_IMPL
*/
@Deprecated
METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
"This class is used to store and retrieval of raw metadata objects such as table, database"),
/**
* @deprecated Use MetastoreConf.TXN_STORE_IMPL
*/
@Deprecated
METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl",
"org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
"Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " +
"class is used to store and retrieve transactions and locks"),
/**
* @deprecated Use MetastoreConf.CONNECTION_DRIVER
*/
@Deprecated
METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
"Driver class name for a JDBC metastore"),
/**
* @deprecated Use MetastoreConf.MANAGER_FACTORY_CLASS
*/
@Deprecated
METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
"org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
"class implementing the jdo persistence"),
/**
* @deprecated Use MetastoreConf.EXPRESSION_PROXY_CLASS
*/
@Deprecated
METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
"org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
/**
* @deprecated Use MetastoreConf.DETACH_ALL_ON_COMMIT
*/
@Deprecated
METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
"Detaches all objects from session so that they can be used after transaction is committed"),
/**
* @deprecated Use MetastoreConf.NON_TRANSACTIONAL_READ
*/
@Deprecated
METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
"Reads outside of transactions"),
/**
* @deprecated Use MetastoreConf.CONNECTION_USER_NAME
*/
@Deprecated
METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
"Username to use against metastore database"),
/**
* @deprecated Use MetastoreConf.END_FUNCTION_LISTENERS
*/
@Deprecated
METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
"List of comma separated listeners for the end of metastore functions."),
/**
* @deprecated Use MetastoreConf.PART_INHERIT_TBL_PROPS
*/
@Deprecated
METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
"List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
"* implies all the keys will get inherited."),
/**
* @deprecated Use MetastoreConf.FILTER_HOOK
*/
@Deprecated
METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
"Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+ "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
" to fire events for DML operations"),
METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
"Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
"or drops partitions iteratively"),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_ENABLED
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", true,
"Whether aggregate stats caching is enabled or not."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_SIZE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
"Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_PARTITIONS
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
"Maximum number of partitions that are aggregated per cache node."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_FPP
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
"Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_VARIANCE
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
"Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_TTL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for a cached node to be active in the cache before they become stale."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a writer will wait to acquire the writelock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_READER_WAIT
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a reader will wait to acquire the readlock before giving up."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_FULL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
"Maximum cache full % after which the cache cleaner thread kicks in."),
/**
* @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_CLEAN_UNTIL
*/
@Deprecated
METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
"The cleaner thread cleans until cache reaches this % full size."),
/**
* @deprecated Use MetastoreConf.METRICS_ENABLED
*/
@Deprecated
METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
/**
* @deprecated Use MetastoreConf.INIT_METADATA_COUNT_ENABLED
*/
@Deprecated
METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true,
"Enable a metadata count at metastore startup for metrics."),
// Metastore SSL settings
/**
* @deprecated Use MetastoreConf.USE_SSL
*/
@Deprecated
HIVE_METASTORE_USE_SSL("hive.metastore.use.SSL", false,
"Set this to true for using SSL encryption in HMS server."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PATH("hive.metastore.keystore.path", "",
"Metastore SSL certificate keystore location."),
/**
* @deprecated Use MetastoreConf.SSL_KEYSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_KEYSTORE_PASSWORD("hive.metastore.keystore.password", "",
"Metastore SSL certificate keystore password."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PATH
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PATH("hive.metastore.truststore.path", "",
"Metastore SSL certificate truststore location."),
/**
* @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PASSWORD
*/
@Deprecated
HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD("hive.metastore.truststore.password", "",
"Metastore SSL certificate truststore password."),
// Parameters for exporting metadata on table drop (requires the use of the)
// org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
/**
* @deprecated Use MetastoreConf.METADATA_EXPORT_LOCATION
*/
@Deprecated
METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
"metadata being exported to the current user's home directory on HDFS."),
/**
* @deprecated Use MetastoreConf.MOVE_EXPORTED_METADATA_TO_TRASH
*/
@Deprecated
MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
"alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
"Whether to include the current database in the Hive prompt."),
CLIPROMPT("hive.cli.prompt", "hive",
"Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
"Variable substitution will only be invoked at the Hive CLI startup."),
CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
"The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
"If the value of this property is -1, then Hive will use the auto-detected terminal width."),
/**
* @deprecated Use MetastoreConf.FS_HANDLER_CLS
*/
@Deprecated
HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
// Things we log in the jobconf
// session identifier
HIVESESSIONID("hive.session.id", "", ""),
// whether session is running in silent mode or not
HIVESESSIONSILENT("hive.session.silent", false, ""),
HIVE_LOCAL_TIME_ZONE("hive.local.time.zone", "LOCAL",
"Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\n" +
"LOCAL, it is not specified, or it is not a correct time-zone, the system default time-zone will be\n " +
"used instead. Time-zone IDs can be specified as region-based zone IDs (based on IANA time-zone data),\n" +
"abbreviated zone IDs, or offset IDs."),
HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
"Whether to log Hive query, query plan, runtime statistics etc."),
HIVEQUERYSTRING("hive.query.string", "",
"Query being executed (might be multiple per a session)"),
HIVEQUERYID("hive.query.id", "",
"ID for query being executed (might be multiple per a session)"),
HIVEQUERYTAG("hive.query.tag", null,
"tag for the query"),
HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
// hive jar
HIVEJAR("hive.jar.path", "",
"The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
HIVEAUXJARS("hive.aux.jars.path", "",
"The location of the plugin jars that contain implementations of user defined functions and serdes."),
// reloadable jars
HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
"The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n"
+ "by executing reload command. And these jars can be "
+ "used as the auxiliary classes like creating a UDF or SerDe."),
// hive added files and jars
HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
HIVEADDFILESUSEHDFSLOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of "
+ "copy to session based HDFS scratch directory, to make distributed cache more useful."),
HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
// for hive script operator
HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"How long to run autoprogressor for the script/UDTF operators.\n" +
"Set to 0 for forever."),
HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
"Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
"to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" +
"outputting to stderr. This option removes the need of periodically producing stderr messages, \n" +
"but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
"Name of the environment variable that holds the unique script operator ID in the user's \n" +
"transform function (the custom mapper/reducer that the user has specified in the query)"),
HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
"Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
"hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist",
"Comma separated list of keys from the configuration file not to convert to environment " +
"variables when invoking the script operator"),
HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT("hive.strict.checks.orderby.no.limit", false,
"Enabling strict large query checks disallows the following:\n" +
" Orderby without limit.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_NO_PARTITION_FILTER("hive.strict.checks.no.partition.filter", false,
"Enabling strict large query checks disallows the following:\n" +
" No partition being picked up for a query against partitioned table.\n" +
"Note that this check currently does not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
"Enabling strict type safety checks disallows the following:\n" +
" Comparing bigints and strings.\n" +
" Comparing bigints and doubles."),
HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", false,
"Enabling strict Cartesian join checks disallows the following:\n" +
" Cartesian product (cross join)."),
HIVE_STRICT_CHECKS_BUCKETING("hive.strict.checks.bucketing", true,
"Enabling strict bucketing checks disallows the following:\n" +
" Load into bucketed tables."),
HIVE_LOAD_DATA_OWNER("hive.load.data.owner", "",
"Set the owner of files loaded using load data in managed tables."),
@Deprecated
HIVEMAPREDMODE("hive.mapred.mode", null,
"Deprecated; use hive.strict.checks.* settings instead."),
HIVEALIAS("hive.alias", "", ""),
HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
"How many rows in the right-most join operand Hive should buffer before emitting the join result."),
HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
"How many rows in the joining tables (except the streaming table) should be cached in memory."),
HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false,
"Whether to push non-equi filter predicates within inner joins. This can improve efficiency in "
+ "the evaluation of certain joins, since we will not be emitting rows which are thrown away by "
+ "a Filter operator straight away. However, currently vectorization does not support them, thus "
+ "enabling it is only recommended when vectorization is disabled."),
// CBO related
HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to conjunctive normal form (CNF), fail if" +
"the expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves and" +
"interior nodes). -1 to not set up a threshold."),
HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
+ "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transferring a byte over network;"
+ " expressed as multiple of CPU cost"),
HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
+ " expressed as multiple of Local FS write cost"),
HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
+ " expressed as multiple of Local FS read cost"),
HIVE_CBO_SHOW_WARNINGS("hive.cbo.show.warnings", true,
"Toggle display of CBO warnings like missing column stats"),
HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS("hive.cbo.stats.correlated.multi.key.joins", true,
"When CBO estimates output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
SEMIJOIN_CONVERSION("hive.optimize.semijoin.conversion", true, "convert group by followed by inner equi join into semijoin"),
HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" +
"columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"),
// materialized views
HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", true,
"Whether to try to rewrite queries using the materialized views enabled for rewriting"),
HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY("hive.materializedview.rewriting.strategy", "heuristic",
new StringSet("heuristic", "costbased"),
"The strategy that should be used to cost and select the materialized view rewriting. \n" +
" heuristic: Always try to select the plan using the materialized view if rewriting produced one," +
"choosing the plan with lower cost among possible plans containing a materialized view\n" +
" costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether " +
"it uses a materialized view or not"),
HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW("hive.materializedview.rewriting.time.window", "0min", new TimeValidator(TimeUnit.MINUTES),
"Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.\n" +
"For instance, if more time than the value assigned to the property has passed since the materialized view " +
"was created or rebuilt, and one of its source tables has changed since, the materialized view will not be " +
"considered for rewriting. Default value 0 means that the materialized view cannot be " +
"outdated to be used automatically in query rewriting. Value -1 means to skip this check."),
HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL("hive.materializedview.rewriting.incremental", false,
"Whether to try to execute incremental rewritings based on outdated materializations and\n" +
"current content of tables. Default value of true effectively amounts to enabling incremental\n" +
"rebuild for the materializations too."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL("hive.materializedview.rebuild.incremental", true,
"Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild\n" +
"tries to modify the original materialization contents to reflect the latest changes to the\n" +
"materialized view source tables, instead of rebuilding the contents fully. Incremental rebuild\n" +
"is based on the materialized view algebraic incremental rewriting."),
HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL_FACTOR("hive.materializedview.rebuild.incremental.factor", 0.1f,
"The estimated cost of the resulting plan for incremental maintenance of materialization\n" +
"with aggregations will be multiplied by this value. Reducing the value can be useful to\n" +
"favour incremental rebuild over full rebuild."),
HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
"Default file format for CREATE MATERIALIZED VIEW statement"),
HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"),
HIVE_ENABLE_JDBC_PUSHDOWN("hive.jdbc.pushdown.enable", true,
"Flag to control enabling pushdown of operators into JDBC connection and subsequent SQL generation\n" +
"using Calcite."),
// hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
// need to remove by hive .13. Also, do not change default (see SMB operator)
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
"Whether Hive should use memory-optimized hash table for MapJoin.\n" +
"Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
(float) 0.5, "Probing space percentage of the optimized hashtable"),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
"hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
"This number should be power of 2."),
HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
"Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
"Hybrid grace hash join, the minimum number of partitions to create."),
HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
"Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
"store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
"joins unnecessary memory will be allocated and then trimmed."),
HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " +
"use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."),
HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
"How many rows with the same key value should be cached in memory per smb joined table."),
HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
"Number of rows after which size of the grouping keys/aggregation classes is performed"),
HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.99,
"Portion of total memory to be used by map-side group aggregation hash table"),
HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
"Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
"The max memory to be used by map-side group aggregation hash table.\n" +
"If the memory usage is higher than this number, force to flush data"),
HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5,
"Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" +
"Set to 1 to make sure hash aggregation is never turned off."),
HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
"Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" +
"common group by keys, it will be optimized to generate single M/R job."),
HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true,
"If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
"the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
"is that it limits the number of mappers to the number of files."),
HIVE_GROUPBY_POSITION_ALIAS("hive.groupby.position.alias", false,
"Whether to enable using Column Position Alias in Group By"),
HIVE_ORDERBY_POSITION_ALIAS("hive.orderby.position.alias", true,
"Whether to enable using Column Position Alias in Order By"),
@Deprecated
HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
"Whether to enable using Column Position Alias in Group By or Order By (deprecated).\n" +
"Use " + HIVE_ORDERBY_POSITION_ALIAS.varname + " or " + HIVE_GROUPBY_POSITION_ALIAS.varname + " instead"),
HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
"Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
"For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
"4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
"This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
"and map-side aggregation does not do a very good job. \n" +
"\n" +
"This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
"cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
"assumption that the original group by will reduce the data size."),
HIVE_GROUPBY_LIMIT_EXTRASTEP("hive.groupby.limit.extrastep", true, "This parameter decides if Hive should \n" +
"create new MR job for sorting final output"),
// Max file num and size used to do a single copy (after that, distcp is used)
HIVE_EXEC_COPYFILE_MAXNUMFILES("hive.exec.copyfile.maxnumfiles", 1L,
"Maximum number of files Hive uses to do sequential HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
"Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
// for hive udtf operator
HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
"Whether Hive should automatically send progress information to TaskTracker \n" +
"when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" +
"because this may prevent TaskTracker from killing tasks with infinite loops."),
HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
"created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
"for all tables."),
HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"),
"Default file format for storing result of the query."),
HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
// default serde for rcfile
HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
"The default SerDe Hive will use for the RCFile format"),
HIVEDEFAULTSERDE("hive.default.serde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe Hive will use for storage formats that do not specify a SerDe."),
/**
* @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA
*/
@Deprecated
SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
"org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
"org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
"SerDes retrieving schema from metastore. This is an internal parameter."),
@Deprecated
HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes",
false,
"A backward compatibility setting for external metastore users that do not handle \n" +
SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."),
HIVEHISTORYFILELOC("hive.querylog.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Location of Hive run time structured log file"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
"Whether to log the plan's progress every time a job's progress is checked.\n" +
"These logs are written to the location specified by hive.querylog.location"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"The interval to wait between logging the plan's progress.\n" +
"If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
"the progress is logged regardless of this value.\n" +
"The actual interval will be the ceiling of (this value divided by the value of\n" +
"hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
"I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
"logged less frequently than specified.\n" +
"This only has an effect if hive.querylog.enable.plan.progress is set to true."),
HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe for transmitting input data to and reading output data from the user scripts. "),
HIVESCRIPTRECORDREADER("hive.script.recordreader",
"org.apache.hadoop.hive.ql.exec.TextRecordReader",
"The default record reader for reading data from the user scripts. "),
HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
"org.apache.hadoop.hive.ql.exec.TextRecordWriter",
"The default record writer for writing data to the user scripts. "),
HIVESCRIPTESCAPE("hive.transform.escape.input", false,
"This adds an option to escape special chars (newlines, carriage returns and\n" +
"tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
"can contain data that contains special characters."),
HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
"Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
"The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
//small table file size
HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
"The threshold for the input file size of the small tables; if the file size is smaller \n" +
"than this threshold, it will try to convert the common join into map join"),
HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
"Use schema evolution to convert self-describing file format's data to the schema desired by the reader."),
HIVE_ORC_FORCE_POSITIONAL_SCHEMA_EVOLUTION("orc.force.positional.evolution", true,
"Whether to use column position based schema evolution or not (as opposed to column name based evolution)"),
/** Don't use this directly - use AcidUtils! */
HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
"internal usage only -- do transaction (ACID or insert-only) table scan.", true),
HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 10000000,
"Vectorized ACID readers can often load all the delete events from all the delete deltas\n"
+ "into memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic\n"
+ "that limits the total number of delete events that can be loaded into memory at once.\n"
+ "Roughly it has been set to 10 million delete events per bucket (~160 MB).\n"),
HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
"A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
// test mode in hive mode
HIVETESTMODE("hive.test.mode", false,
"Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
false),
HIVEEXIMTESTMODE("hive.exim.test.mode", false,
"The subset of test mode that only enables custom path handling for ExIm.", false),
HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
"In test mode, specifies prefixes for the output table", false),
HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
"In test mode, specifies sampling frequency for table, which is not bucketed,\n" +
"For example, the following query:\n" +
" INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
"would be converted to\n" +
" INSERT OVERWRITE TABLE test_dest\n" +
" SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
"In test mode, specifies comma separated table names which would not apply sampling", false),
HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false),
HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false),
HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false),
TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1,
"For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified\n" +
"format", false),
HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
"Merge small files at the end of a map-only job"),
HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
"Merge small files at the end of a map-reduce job"),
HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
"Size of merged files at the end of the job"),
HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
"When the average output file size of a job is less than this number, Hive will start an additional \n" +
"map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
"if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
"When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
"while writing a table with ORC file format, enabling this config will do stripe-level\n" +
"fast merge for small ORC files. Note that enabling this config will not honor the\n" +
"padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
HIVE_ORC_CODEC_POOL("hive.use.orc.codec.pool", true,
"Whether to use codec pool in ORC. Disable if there are bugs with codec reuse."),
HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
"If this is set the header for RCFiles will simply be RCF. If this is not\n" +
"set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
"by the input and output RCFile formats."),
HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M
PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
"Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
"It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
"This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", false,
"Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
"on reading parquet files from other tools"),
HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
"Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
"Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio of base writer and\n" +
"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED("hive.exec.orc.delta.streaming.optimizations.enabled", false,
"Whether to enable streaming optimizations for ORC delta files. This will disable ORC's internal indexes,\n" +
"disable compression, enable fast encoding and disable dictionary encoding."),
HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
"This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
" as opposed to query execution (split generation does not read or cache file footers)." +
" ETL strategy is used when spending little more time in split generation is acceptable" +
" (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
" based on heuristics."),
HIVE_ORC_WRITER_LLAP_MEMORY_MANAGER_ENABLED("hive.exec.orc.writer.llap.memory.manager.enabled", true,
"Whether orc writers should use llap-aware memory manager. LLAP aware memory manager will use memory\n" +
"per executor instead of entire heap memory when concurrent orc writers are involved. This will let\n" +
"task fragments to use memory within its limit (memory per executor) when performing ETL in LLAP."),
// hive streaming ingest settings
HIVE_STREAMING_AUTO_FLUSH_ENABLED("hive.streaming.auto.flush.enabled", true, "Whether to enable memory \n" +
"monitoring and automatic flushing of open record updaters during streaming ingest. This is an expert level \n" +
"setting and disabling this may have severe performance impact under memory pressure."),
HIVE_HEAP_MEMORY_MONITOR_USAGE_THRESHOLD("hive.heap.memory.monitor.usage.threshold", 0.7f,
"Hive streaming does automatic memory management across all open record writers. This threshold will let the \n" +
"memory monitor take an action (flush open files) when heap memory usage exceeded this threshold."),
HIVE_STREAMING_AUTO_FLUSH_CHECK_INTERVAL_SIZE("hive.streaming.auto.flush.check.interval.size", "100Mb",
new SizeValidator(),
"Hive streaming ingest has auto flush mechanism to flush all open record updaters under memory pressure.\n" +
"When memory usage exceed hive.heap.memory.monitor.default.usage.threshold, the auto-flush mechanism will \n" +
"wait until this size (default 100Mb) of records are ingested before triggering flush."),
HIVE_CLASSLOADER_SHADE_PREFIX("hive.classloader.shade.prefix", "", "During reflective instantiation of a class\n" +
"(input, output formats, serde etc.), when classloader throws ClassNotFoundException, as a fallback this\n" +
"shade prefix will be used before class reference and retried."),
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
"Whether to enable using file metadata cache in metastore for ORC file footers."),
HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true,
"Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" +
"must also be set to true for this to work)."),
HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
"If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
"data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS("hive.orc.splits.directory.batch.ms", 0,
"How long, in ms, to wait to batch input directories for processing during ORC split\n" +
"generation. 0 means process directories individually. This can increase the number of\n" +
"metastore calls if metastore metadata cache is used."),
HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
"Include file ID in splits on file systems that support it."),
HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
"Allow synthetic file ID in splits on file systems that don't have a native one."),
HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE("hive.orc.cache.stripe.details.mem.size", "256Mb",
new SizeValidator(), "Maximum size of orc splits cached in the client."),
HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
"How many threads orc should use to create splits in parallel."),
HIVE_ORC_CACHE_USE_SOFT_REFERENCES("hive.orc.cache.use.soft.references", false,
"By default, the cache that ORC input format uses to store orc file footer use hard\n" +
"references for the cached object. Setting this to true can help avoid out of memory\n" +
"issues under memory pressure (in some cases) at the cost of slight unpredictability in\n" +
"overall query performance."),
HIVE_IO_SARG_CACHE_MAX_WEIGHT_MB("hive.io.sarg.cache.max.weight.mb", 10,
"The max weight allowed for the SearchArgument Cache. By default, the cache allows a max-weight of 10MB, " +
"after which entries will be evicted."),
HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
"LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
"'1', and '0' as extended, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
"The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
"boolean literal."),
HIVESKEWJOIN("hive.optimize.skewjoin", false,
"Whether to enable skew join optimization. \n" +
"The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
"processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
"job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
"the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
"map-join."),
HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
"Whether to enable dynamically partitioned hash join optimization. \n" +
"This setting is also dependent on enabling hive.auto.convert.join"),
HIVECONVERTJOIN("hive.auto.convert.join", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
"If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
"specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
10000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
"the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
"For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
"filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
"Currently, this is not working with vectorization or tez execution engine."),
HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
"Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
"we think the key as a skew join key. "),
HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
"Determine the number of map task used in the follow up map join job for a skew join.\n" +
"It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
"Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
"the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
"Send a heartbeat after this interval - used by mapjoin and filter operators"),
HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
"When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
"When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
"Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
"Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
"Insert queries are not restricted by this limit."),
HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
"The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."),
HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted number of entries in hashtable for a given join \n" +
"input is larger than this number, the join will not be converted to a mapjoin. \n" +
"The value \"-1\" means no limit."),
XPRODSMALLTABLEROWSTHRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side"
+ " of map join before it switches over to cross product edge"),
HIVECONVERTJOINMAXSHUFFLESIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicted size of the larger input for a given join is greater \n" +
"than this number, the join will not be converted to a dynamically partitioned hash join. \n" +
"The value \"-1\" means no limit."),
HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f,
"Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
" of the number of keys is divided by this value. If the value is 0, statistics are not used" +
"and hive.hashtable.initialCapacity is used instead."),
HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
"mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"),
HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
"when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
"the local task will abort by itself. It means the data of the small table is too large to be held in memory."),
HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
"If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
"It means the data of the small table is too large to be held in memory."),
HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
"The number means after how many rows processed it needs to check the memory usage"),
HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
"The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"The default input format for tez. Tez groups splits in the AM."),
HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
"By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
"By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
"This can be used to overwrite."),
HIVETEZJAVAOPTS("hive.tez.java.opts", null,
"By default Tez will use the Java options from map tasks. This can be used to overwrite."),
HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
"The log level to use for tasks executing as part of the DAG.\n" +
"Used only if hive.tez.java.opts is used to configure Java options."),
HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true,
"Whether to grant access to the hs2/hive user for queries"),
HIVEQUERYNAME ("hive.query.name", null,
"This named is used by Tez to set the dag name. This name in turn will appear on \n" +
"the Tez UI representing the work that was done. Used by Spark to set the query name, will show up in the\n" +
"Spark UI."),
HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
"Don't create a reducer for enforcing \n" +
"bucketing/sorting for queries of the form: \n" +
"insert overwrite table T2 select * from T1;\n" +
"where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
"If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
"If the user asked for bucketed map-side join, and it cannot be performed, \n" +
"should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
"not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
"query will fail if hive.enforce.bucketmapjoin is set to true."),
HIVE_ENFORCE_NOT_NULL_CONSTRAINT("hive.constraint.notnull.enforce", true,
"Should \"IS NOT NULL \" constraint be enforced?"),
HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", true,
"Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true,
"Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side."),
HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
"hive.auto.convert.sortmerge.join.bigtable.selection.policy",
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
"The policy to choose the big table for automatic conversion to sort-merge join. \n" +
"By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
". based on position of the table - the leftmost table is selected\n" +
"org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
". based on total size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" +
". based on average size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" +
"New policies can be added in future."),
HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
"hive.auto.convert.sortmerge.join.to.mapjoin", false,
"If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" +
"this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" +
"tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" +
"big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" +
"sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" +
"and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" +
"with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" +
"if the complete small table can fit in memory, and a map-join can be performed."),
HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""),
HIVEROWOFFSET("hive.exec.rowoffset", false,
"Whether to provide the row offset virtual column"),
// Optimizer
HIVEOPTINDEXFILTER("hive.optimize.index.filter", false, "Whether to enable automatic use of indexes"),
HIVEOPTPPD("hive.optimize.ppd", true,
"Whether to enable predicate pushdown"),
HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
"Whether to enable predicate pushdown through windowing"),
HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true,
"Whether to transitively replicate predicate filters over equijoin conditions."),
HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
"During query optimization, filters may be pushed down in the operator tree. \n" +
"If this config is true only pushed down filters remain in the operator tree, \n" +
"and the original filter is removed. If this config is false, the original filter \n" +
"is also left in the operator tree at the original place."),
HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
"Whether to transform OR clauses in Filter operators into IN clauses"),
HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2,
"Minimum number of OR clauses needed to transform into IN clauses"),
HIVECOUNTDISTINCTOPTIMIZER("hive.optimize.countdistinct", true,
"Whether to transform count distinct into two stages"),
HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
"Extract partition columns from IN clauses"),
// Constant propagation optimizer
HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),
HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false,
"Whether to eliminate scans of the tables from which no columns are selected. Note\n" +
"that, when selecting from empty tables with data files, this can produce incorrect\n" +
"results, so it's disabled by default. It works correctly for normal tables."),
HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations which are guaranteed to not generate any rows"),
HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
"Whether to push predicates down to storage handlers"),
HIVEOPTGROUPBY("hive.optimize.groupby", true,
"Whether to enable the bucketed group by from bucketed partitions/tables."),
HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false,
"Whether to try bucket mapjoin"),
HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false,
"Whether to try sorted bucket merge map join"),
HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true,
"Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" +
"This should always be set to true. Since it is a new feature, it has been made configurable."),
HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4,
"Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" +
"That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" +
"The optimization will be automatically disabled if number of reducers would be less than specified value."),
HIVEOPTJOINREDUCEDEDUPLICATION("hive.optimize.joinreducededuplication", true,
"Remove extra shuffle/sorting operations after join algorithm selection has been executed. \n" +
"Currently it only works with Apache Tez. This should always be set to true. \n" +
"Since it is a new feature, it has been made configurable."),
HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers."),
HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."),
HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
"Probability with which a row will be chosen."),
HIVE_REMOVE_ORDERBY_IN_SUBQUERY("hive.remove.orderby.in.subquery", true,
"If set to true, order/sort by without limit in sub queries will be removed."),
HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this "
+ "optimization rewrites distinct aggregates from a single stage to multi-stage "
+ "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or "
+ "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."),
// whether to optimize union followed by select followed by filesink
// It creates sub-directories in the final output, so should not be turned on in systems
// where MAPREDUCE-1501 is not present
HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false,
"Whether to remove the union and push the operators between union and the filesink above union. \n" +
"This avoids an extra scan of the output by union. This is independently useful for union\n" +
"queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" +
"extra union is inserted.\n" +
"\n" +
"The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" +
"If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" +
"number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" +
"we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false,
"Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" +
"input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" +
"to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" +
"size of the outer input of the join or input of the union that we should get in order to apply the rule."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" +
"number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."),
HIVE_OPTIMIZE_CONSTRAINTS_JOIN("hive.optimize.constraints.join", true, "Whether to use referential constraints\n" +
"to optimize (remove or transform) join operators"),
HIVE_OPTIMIZE_REDUCE_WITH_STATS("hive.optimize.filter.stats.reduction", false, "Whether to simplify comparison\n" +
"expressions in filter operators using column stats"),
HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
"Whether to create a separate plan for skewed keys for the tables in the join.\n" +
"This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" +
"into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" +
"a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" +
"in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" +
"\n" +
"The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" +
"uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" +
"If there is no skew information in the metadata, this parameter will not have any affect.\n" +
"Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" +
"Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" +
"so for backward compatibility.\n" +
"\n" +
"If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
"would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
HIVE_SHARED_WORK_OPTIMIZATION("hive.optimize.shared.work", true,
"Whether to enable shared work optimizer. The optimizer finds scan operator over the same table\n" +
"and follow-up operators in the query plan and merges them if they meet some preconditions. Tez only."),
HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION("hive.optimize.shared.work.extended", true,
"Whether to enable shared work extended optimizer. The optimizer tries to merge equal operators\n" +
"after a work boundary after shared work optimizer has been executed. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_SHARED_WORK_REUSE_MAPJOIN_CACHE("hive.optimize.shared.work.mapjoin.cache.reuse", true,
"When shared work optimizer is enabled, whether we should reuse the cache for the broadcast side\n" +
"of mapjoin operators that share same broadcast input. Requires hive.optimize.shared.work\n" +
"to be set to true. Tez only."),
HIVE_COMBINE_EQUIVALENT_WORK_OPTIMIZATION("hive.combine.equivalent.work.optimization", true, "Whether to " +
"combine equivalent work objects during physical optimization.\n This optimization looks for equivalent " +
"work objects and combines them if they meet certain preconditions. Spark only."),
HIVE_REMOVE_SQ_COUNT_CHECK("hive.optimize.remove.sq_count_check", true,
"Whether to remove an extra join with sq_count_check for scalar subqueries "
+ "with constant group by keys."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE("hive.optimize.update.table.properties.from.serde", false,
"Whether to update table-properties by initializing tables' SerDe instances during logical-optimization. \n" +
"By doing so, certain SerDe classes (like AvroSerDe) can pre-calculate table-specific information, and \n" +
"store it in table-properties, to be used later in the SerDe, while running the job."),
HIVE_OPTIMIZE_TABLE_PROPERTIES_FROM_SERDE_LIST("hive.optimize.update.table.properties.from.serde.list",
"org.apache.hadoop.hive.serde2.avro.AvroSerDe",
"The comma-separated list of SerDe classes that are considered when enhancing table-properties \n" +
"during logical optimization."),
// CTE
HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1,
"If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
"before executing the main query block. -1 will disable this feature."),
// Statistics
HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true,
"Estimate statistics in absence of statistics."),
HIVE_STATS_NDV_ESTIMATE_PERC("hive.stats.ndv.estimate.percent", (float)20,
"This many percentage of rows will be estimated as count distinct in absence of statistics."),
HIVE_STATS_NUM_NULLS_ESTIMATE_PERC("hive.stats.num.nulls.estimate.percent", (float)5,
"This many percentage of rows will be estimated as number of nulls in absence of statistics."),
HIVESTATSAUTOGATHER("hive.stats.autogather", true,
"A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", true,
"A flag to gather column statistics automatically."),
HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
"The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
"each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
"after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_PUBLISHER
*/
@Deprecated
HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
/**
* @deprecated Use MetastoreConf.STATS_DEFAULT_AGGRETATOR
*/
@Deprecated
HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
CLIENT_STATS_COUNTERS("hive.client.stats.counters", "",
"Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" +
"Non-display names should be used"),
//Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used".
HIVE_STATS_RELIABLE("hive.stats.reliable", false,
"Whether queries will fail because stats cannot be collected completely accurately. \n" +
"If this is set to true, reading/writing from/into a partition may fail because the stats\n" +
"could not be computed accurately."),
HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true,
"analyze table T compute statistics for columns. Queries like these should compute partition"
+ "level stats for partitioned table even when no part spec is specified."),
HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10,
"Number of threads used by noscan analyze command for partitioned tables.\n" +
"This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."),
// Collect table access keys information for operators that can benefit from bucketing
HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false,
"Whether join and group by keys on tables are derived and maintained in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if they should be bucketed."),
// Collect column access information
HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false,
"Whether column accesses are tracked in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."),
HIVE_STATS_NDV_ALGO("hive.stats.ndv.algo", "hll", new PatternSet("hll", "fm"),
"hll and fm stand for HyperLogLog and FM-sketch, respectively for computing ndv."),
/**
* @deprecated Use MetastoreConf.STATS_FETCH_BITVECTOR
*/
@Deprecated
HIVE_STATS_FETCH_BITVECTOR("hive.stats.fetch.bitvector", false,
"Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"),
// standard error allowed for ndv estimates for FM-sketch. A lower value indicates higher accuracy and a
// higher compute cost.
HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0,
"Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" +
"A lower value for error indicates higher accuracy and a higher compute cost."),
/**
* @deprecated Use MetastoreConf.STATS_NDV_TUNER
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0,
"Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
"The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
"Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
/**
* @deprecated Use MetastoreConf.STATS_NDV_DENSITY_FUNCTION
*/
@Deprecated
HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false,
"Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
// if length of variable length data type cannot be determined this length will be used.
HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics, for variable length columns (like string, bytes etc.), this value will be\n" +
"used. For fixed length columns their corresponding Java equivalent sizes are used\n" +
"(float - 4 bytes, double - 8 bytes etc.)."),
// if number of elements in list cannot be determined, this value will be used
HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like list, the average number of\n" +
"entries/values can be specified using this config."),
// if number of elements in map cannot be determined, this value will be used
HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like map, the average number of\n" +
"entries/values can be specified using this config."),
// statistics annotation fetches column statistics for all required columns which can
// be very expensive sometimes
HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", false,
"Annotation of operator tree with statistics information requires column statistics.\n" +
"Column statistics are fetched from metastore. Fetching column statistics for each needed column\n" +
"can be expensive when the number of columns is high. This flag can be used to disable fetching\n" +
"of column statistics from metastore."),
// in the absence of column statistics, the estimated number of rows/data size that will
// be emitted from join operator will depend on this factor
HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\n" +
"uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" +
"In the absence of column statistics, this factor determines the amount of rows that flows out\n" +
"of JOIN operator."),
HIVE_STATS_CORRELATED_MULTI_KEY_JOINS("hive.stats.correlated.multi.key.joins", true,
"When estimating output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
// in the absence of uncompressed/raw data size, total file size will be used for statistics
// annotation. But the file may be compressed, encoded and serialized which may be lesser in size
// than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
// the raw data size.
HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 10.0,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\n" +
"of basic statistics like number of rows and data size, file size is used to estimate the number\n" +
"of rows and data size. Since files in tables/partitions are serialized (and optionally\n" +
"compressed) the estimates of number of rows and data size cannot be reliably determined.\n" +
"This factor is multiplied with the file size to account for serialization and compression."),
HIVE_STATS_IN_CLAUSE_FACTOR("hive.stats.filter.in.factor", (float) 1.0,
"Currently column distribution is assumed to be uniform. This can lead to overestimation/underestimation\n" +
"in the number of rows filtered by a certain operator, which in turn might lead to overprovision or\n" +
"underprovision of resources. This factor is applied to the cardinality estimation of IN clauses in\n" +
"filter operators."),
HIVE_STATS_IN_MIN_RATIO("hive.stats.filter.in.min.ratio", (float) 0.0f,
"Output estimation of an IN filter can't be lower than this ratio"),
// Concurrency
HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
"Whether Hive supports concurrency control or not. \n" +
"A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""),
HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100,
"The number of times you want to try to get all the locks"),
HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10,
"The number of times you want to retry to do one unlock"),
HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"The maximum sleep time between various retries"),
HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
"This param is to control whether or not only do lock on queries\n" +
"that need to execute at least one mapred job."),
HIVE_LOCK_QUERY_STRING_MAX_LENGTH("hive.lock.query.string.max.length", 1000000,
"The maximum length of the query string to store in the lock.\n" +
"The default value is 1000000, since the data limit of a znode is 1MB"),
HIVE_MM_ALLOW_ORIGINALS("hive.mm.allow.originals", true,
"Whether to allow original files in MM tables. Conversion to MM may be expensive if\n" +
"this is set to false, however unless MAPREDUCE-7086 fix is present, queries that\n" +
"read MM tables with original files will fail. The default in Hive 3.0 is false."),
// Zookeeper related configs
HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "",
"List of ZooKeeper servers to talk to. This is needed for: \n" +
"1. Read/write locks - when hive.lock.manager is set to \n" +
"org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n" +
"2. When HiveServer2 supports service discovery via Zookeeper.\n" +
"3. For delegation token storage if zookeeper store is used, if\n" +
"hive.cluster.delegation.token.store.zookeeper.connectString is not set\n" +
"4. LLAP daemon registry service\n" +
"5. Leader selection for privilege synchronizer"),
HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181",
"The port of ZooKeeper servers to talk to.\n" +
"If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
"does not contain port numbers, this value is used."),
HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
"if a heartbeat is not sent in the timeout."),
HIVE_ZOOKEEPER_CONNECTION_TIMEOUT("hive.zookeeper.connection.timeout", "15s",
new TimeValidator(TimeUnit.SECONDS),
"ZooKeeper client's connection timeout in seconds. Connection timeout * hive.zookeeper.connection.max.retries\n" +
"with exponential backoff is when curator client deems connection is lost to zookeeper."),
HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
"The parent node under which all ZooKeeper nodes are created."),
HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
"Clean extra nodes at the end of the session."),
HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
"Max number of times to retry when connecting to the ZooKeeper server."),
HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Initial amount of time (in milliseconds) to wait between retries\n" +
"when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
// Transactions
HIVE_TXN_MANAGER("hive.txn.manager",
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
"transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
"hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
"and hive.exec.dynamic.partition.mode (nonstrict).\n" +
"The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
"no transactions."),
HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" +
"resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" +
"In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" +
"allows two concurrent writes to the same partition but still lets lock manager prevent\n" +
"DROP TABLE etc. when the table is being written to"),
TXN_OVERWRITE_X_LOCK("hive.txn.xlock.iow", true,
"Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for\b" +
"transactional tables. This ensures that inserts (w/o overwrite) running concurrently\n" +
"are not hidden by the INSERT OVERWRITE."),
HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", true,
"Whether Hive supports transactional stats (accurate stats for transactional tables)"),
/**
* @deprecated Use MetastoreConf.TXN_TIMEOUT
*/
@Deprecated
HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS),
"time after which transactions are declared aborted if the client has not sent a heartbeat."),
/**
* @deprecated Use MetastoreConf.TXN_HEARTBEAT_THREADPOOL_SIZE
*/
@Deprecated
HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " +
"threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"),
TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false,
"Set this to true so that when attempt to acquire a lock on resource times out, the current state" +
" of the lock manager is dumped to log file. This is for debugging. See also " +
"hive.lock.numretries and hive.lock.sleep.between.retries."),
HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 1,
"1: Enable split-update feature found in the newer version of Hive ACID subsystem\n" +
"4: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing.\n" +
"This is intended to be used as an internal property for future versions of ACID. (See\n" +
"HIVE-14035 for details. User sets it tblproperites via transactional_properties.)", true),
/**
* @deprecated Use MetastoreConf.MAX_OPEN_TXNS
*/
@Deprecated
HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" +
"current open transactions reach this limit, future open transaction requests will be \n" +
"rejected, until this number goes below the limit."),
/**
* @deprecated Use MetastoreConf.COUNT_OPEN_TXNS_INTERVAL
*/
@Deprecated
HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks to count open transactions."),
/**
* @deprecated Use MetastoreConf.TXN_MAX_OPEN_BATCH
*/
@Deprecated
HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
"Maximum number of transactions that can be fetched in one call to open_txns().\n" +
"This controls how many transactions streaming agents such as Flume or Storm open\n" +
"simultaneously. The streaming agent then writes that number of entries into a single\n" +
"file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
"of delta files created by streaming agents. But it also increases the number of open\n" +
"transactions that Hive has to track at any given time, which may negatively affect\n" +
"read performance."),
/**
* @deprecated Use MetastoreConf.TXN_RETRYABLE_SQLEX_REGEX
*/
@Deprecated
HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
"of regular expression patterns for SQL state, error code, and error message of\n" +
"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
"The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_ON
*/
@Deprecated
HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
"Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
"Set this to true on one instance of the Thrift metastore service as part of turning\n" +
"on Hive transactions. For a complete list of parameters required for turning on\n" +
"transactions, see hive.txn.manager."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_WORKER_THREADS
*/
@Deprecated
HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0,
"How many compactor worker threads to run on this metastore instance. Set this to a\n" +
"positive number on one or more instances of the Thrift metastore service as part of\n" +
"turning on Hive transactions. For a complete list of parameters required for turning\n" +
"on transactions, see hive.txn.manager.\n" +
"Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
"themselves. Increasing the number of worker threads will decrease the time it takes\n" +
"tables or partitions to be compacted once they are determined to need compaction.\n" +
"It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
"will be running in the background."),
HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds after which a compaction job will be declared failed and the\n" +
"compaction re-queued."),
HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds between checks to see if any tables or partitions need to be\n" +
"compacted. This should be kept high because each check for compaction requires\n" +
"many calls against the NameNode.\n" +
"Decreasing this value will reduce the time it takes for compaction to be started\n" +
"for a table or partition that requires compaction. However, checking if compaction\n" +
"is needed requires several calls to the NameNode for each table or partition that\n" +
"has had a transaction done on it since the last major compaction. So decreasing this\n" +
"value will increase the load on the NameNode."),
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
"Number of delta directories in a table or partition that will trigger a minor\n" +
"compaction."),
HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f,
"Percentage (fractional) size of the delta files relative to the base that will trigger\n" +
"a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"),
COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " +
"the compactor will attempt to handle in a single job."),
HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000,
"Number of aborted transactions involving a given table or partition that will trigger\n" +
"a major compaction."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_FAILED_THRESHOLD
*/
@Deprecated
COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2,
new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
"after which automatic compactions will not be scheduled any more. Note that this must be less " +
"than hive.compactor.history.retention.failed."),
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" +
"Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."),
TRANSACTIONAL_CONCATENATE_NOBLOCK("hive.transactional.concatenate.noblock", false,
"Will cause 'alter table T concatenate' to be non-blocking"),
HIVE_COMPACTOR_COMPACT_MM("hive.compactor.compact.insert.only", true,
"Whether the compactor should compact insert-only tables. A safety switch."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_SUCCEEDED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3,
new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_FAILED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3,
new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_ATTEMPTED
*/
@Deprecated
COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2,
new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
"retained in compaction history for a given table/partition."),
/**
* @deprecated Use MetastoreConf.COMPACTOR_HISTORY_REAPER_INTERVAL
*/
@Deprecated
COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m",
new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_START
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
/**
* @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_INTERVAL
*/
@Deprecated
HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"),
/**
* @deprecated Use MetastoreConf.WRITE_SET_REAPER_INTERVAL
*/
@Deprecated
WRITE_SET_REAPER_INTERVAL("hive.writeset.reaper.interval", "60s",
new TimeValidator(TimeUnit.MILLISECONDS), "Frequency of WriteSet reaper runs"),
MERGE_CARDINALITY_VIOLATION_CHECK("hive.merge.cardinality.check", true,
"Set to true to ensure that each SQL Merge statement ensures that for each row in the target\n" +
"table there is at most 1 matching row in the source table per SQL Specification."),
OPTIMIZE_ACID_META_COLUMNS("hive.optimize.acid.meta.columns", true, "If true, don't decode\n" +
"Acid metadata columns from storage unless they are needed."),
// For Arrow SerDe
HIVE_ARROW_ROOT_ALLOCATOR_LIMIT("hive.arrow.root.allocator.limit", Long.MAX_VALUE,
"Arrow root allocator memory size limitation in bytes."),
HIVE_ARROW_BATCH_ALLOCATOR_LIMIT("hive.arrow.batch.allocator.limit", 10_000_000_000L,
"Max bytes per arrow batch. This is a threshold, the memory is not pre-allocated."),
HIVE_ARROW_BATCH_SIZE("hive.arrow.batch.size", 1000, "The number of rows sent in one Arrow batch."),
// For Druid storage handler
HIVE_DRUID_INDEXING_GRANULARITY("hive.druid.indexer.segments.granularity", "DAY",
new PatternSet("YEAR", "MONTH", "WEEK", "DAY", "HOUR", "MINUTE", "SECOND"),
"Granularity for the segments created by the Druid storage handler"
),
HIVE_DRUID_MAX_PARTITION_SIZE("hive.druid.indexer.partition.size.max", 5000000,
"Maximum number of records per segment partition"
),
HIVE_DRUID_MAX_ROW_IN_MEMORY("hive.druid.indexer.memory.rownum.max", 75000,
"Maximum number of records in memory while storing data in Druid"
),
HIVE_DRUID_BROKER_DEFAULT_ADDRESS("hive.druid.broker.address.default", "localhost:8082",
"Address of the Druid broker. If we are querying Druid from Hive, this address needs to be\n"
+
"declared"
),
HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS("hive.druid.coordinator.address.default", "localhost:8081",
"Address of the Druid coordinator. It is used to check the load status of newly created segments"
),
HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS("hive.druid.overlord.address.default", "localhost:8090",
"Address of the Druid overlord. It is used to submit indexing tasks to druid."
),
HIVE_DRUID_SELECT_THRESHOLD("hive.druid.select.threshold", 10000,
"Takes only effect when hive.druid.select.distribute is set to false. \n" +
"When we can split a Select query, this is the maximum number of rows that we try to retrieve\n" +
"per query. In order to do that, we obtain the estimated size for the complete result. If the\n" +
"number of records of the query results is larger than this threshold, we split the query in\n" +
"total number of rows/threshold parts across the time dimension. Note that we assume the\n" +
"records to be split uniformly across the time dimension."),
HIVE_DRUID_NUM_HTTP_CONNECTION("hive.druid.http.numConnection", 20, "Number of connections used by\n" +
"the HTTP client."),
HIVE_DRUID_HTTP_READ_TIMEOUT("hive.druid.http.read.timeout", "PT1M", "Read timeout period for the HTTP\n" +
"client in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 1 minute."),
HIVE_DRUID_SLEEP_TIME("hive.druid.sleep.time", "PT10S",
"Sleep time between retries in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 10 seconds."
),
HIVE_DRUID_BASE_PERSIST_DIRECTORY("hive.druid.basePersistDirectory", "",
"Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir."
),
HIVE_DRUID_ROLLUP("hive.druid.rollup", true, "Whether to rollup druid rows or not."),
DRUID_SEGMENT_DIRECTORY("hive.druid.storage.storageDirectory", "/druid/segments"
, "druid deep storage location."),
DRUID_METADATA_BASE("hive.druid.metadata.base", "druid", "Default prefix for metadata tables"),
DRUID_METADATA_DB_TYPE("hive.druid.metadata.db.type", "mysql",
new PatternSet("mysql", "postgresql", "derby"), "Type of the metadata database."
),
DRUID_METADATA_DB_USERNAME("hive.druid.metadata.username", "",
"Username to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_PASSWORD("hive.druid.metadata.password", "",
"Password to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_URI("hive.druid.metadata.uri", "",
"URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)."
),
DRUID_WORKING_DIR("hive.druid.working.directory", "/tmp/workingDirectory",
"Default hdfs working directory used to store some intermediate metadata"
),
HIVE_DRUID_MAX_TRIES("hive.druid.maxTries", 5, "Maximum number of retries before giving up"),
HIVE_DRUID_PASSIVE_WAIT_TIME("hive.druid.passiveWaitTimeMs", 30000L,
"Wait time in ms default to 30 seconds."
),
HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
"Whether writes to HBase should be forced to the write-ahead log. \n" +
"Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
"True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
"restore the HBase table snapshot."),
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"),
"Some select queries can be converted to single FETCH task minimizing latency.\n" +
"Currently the query should be single sourced not having any subquery and should not have\n" +
"any aggregations or distincts (which incurs RS), lateral views and joins.\n" +
"0. none : disable hive.fetch.task.conversion\n" +
"1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" +
"2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)"
),
HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L,
"Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" +
"is calculated by summation of file lengths. If it's not native, storage handler for the table\n" +
"can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."),
HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false,
"Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" +
"final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" +
"stage to fetch task, possibly decreasing the query time."),
HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true,
"When set to true Hive will answer a few queries like count(1) purely using stats\n" +
"stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" +
"For more advanced stats collection need to run analyze table queries."),
// Serde for FetchTask
HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe",
"The SerDe used by FetchTask to serialize the fetch output."),
HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true,
"If true, the evaluation result of a deterministic expression referenced twice or more\n" +
"will be cached.\n" +
"For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" +
"the expression 'key + 10' will be evaluated/cached once and reused for the following\n" +
"expression ('key + 10 = 0'). Currently, this is applied only to expressions in select\n" +
"or filter operators."),
// Hive Variables
HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true,
"This enables substitution using syntax like ${var} ${system:var} and ${env:var}."),
HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40,
"The maximum replacements the substitution engine will do."),
HIVECONFVALIDATION("hive.conf.validation", true,
"Enables type checking for registered Hive configurations"),
SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""),
HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE(
"hive.test.authz.sstd.hs2.mode", false, "test hs2 mode from .q tests", true),
HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false,
"enable or disable the Hive client authorization"),
HIVE_AUTHORIZATION_KERBEROS_USE_SHORTNAME("hive.security.authorization.kerberos.use.shortname", true,
"use short name in Kerberos cluster"),
HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
"The Hive client authorization manager class name. The user defined authorization class should implement \n" +
"interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator",
"hive client authenticator manager class name. The user defined authenticator should implement \n" +
"interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider",
"Names of authorization manager classes (comma separated) to be used in the metastore\n" +
"for authorization. The user defined authorization class should implement interface\n" +
"org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\n" +
"All authorization manager classes have to successfully authorize the metastore API\n" +
"call for the command execution to be allowed."),
HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true,
"If this is true, metastore authorizer authorizes read actions on database, table"),
HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"authenticator manager class name to be used in the metastore for authentication. \n" +
"The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", "",
"the privileges automatically granted to some users whenever a table gets created.\n" +
"An example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\n" +
"and grant create privilege to userZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
"",
"the privileges automatically granted to some groups whenever a table gets created.\n" +
"An example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\n" +
"and grant create privilege to groupZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", "",
"the privileges automatically granted to some roles whenever a table gets created.\n" +
"An example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\n" +
"and grant create privilege to roleZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants",
"",
"The privileges automatically granted to the owner whenever a table gets created.\n" +
"An example like \"select,drop\" will grant select and drop privilege to the owner\n" +
"of the table. Note that the default gives the creator of a table no access to the\n" +
"table (but see HIVE-8067)."),
HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory",
"org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl",
"Authorization DDL task factory implementation"),
// if this is not set default value is set during config initialization
// Default value can't be set in this constructor as it would refer names in other ConfVars
// whose constructor would not have been called
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST(
"hive.security.authorization.sqlstd.confwhitelist", "",
"A Java regex. Configurations parameters that match this\n" +
"regex can be modified by user when SQL standard authorization is enabled.\n" +
"To get the default value, use the 'set <param>' command.\n" +
"Note that the hive.conf.restricted.list checks are still enforced after the white list\n" +
"check"),
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND(
"hive.security.authorization.sqlstd.confwhitelist.append", "",
"2nd Java regex that it would match in addition to\n" +
"hive.security.authorization.sqlstd.confwhitelist.\n" +
"Do not include a starting \"|\" in the value. Using this regex instead\n" +
"of updating the original regex means that you can append to the default\n" +
"set by SQL standard authorization instead of replacing it entirely."),
HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
HIVE_CLI_PRINT_ESCAPE_CRLF("hive.cli.print.escape.crlf", false,
"Whether to print carriage returns and line feeds in row output as escaped \\r and \\n"),
HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" +
"session in background when running CLI with Tez, allowing CLI to be available earlier."),
HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS("hive.disable.unsafe.external.table.operations", true,
"Whether to disable certain optimizations and operations on external tables," +
" on the assumption that data changes by external applications may have negative effects" +
" on these operations."),
HIVE_STRICT_MANAGED_TABLES("hive.strict.managed.tables", false,
"Whether strict managed tables mode is enabled. With this mode enabled, " +
"only transactional tables (both full and insert-only) are allowed to be created as managed tables"),
HIVE_EXTERNALTABLE_PURGE_DEFAULT("hive.external.table.purge.default", false,
"Set to true to set external.table.purge=true on newly created external tables," +
" which will specify that the table data should be deleted when the table is dropped." +
" Set to false maintain existing behavior that external tables do not delete data" +
" when the table is dropped."),
HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
"Whether to throw an exception if dynamic partition insert generates empty results."),
HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a,gs",
"A comma separated list of acceptable URI schemes for import and export."),
// temporary variable for testing. This is added just to turn off this feature in case of a bug in
// deployment. It has not been documented in hive-default.xml intentionally, this should be removed
// once the feature is stable
HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true,
"Parameter that determines if 'regular' (non-replication) export dumps can be\n" +
"imported on to tables that are the target of replication. If this parameter is\n" +
"set, regular imports will check if the destination table(if it exists) has a " +
"'repl.last.id' set on it. If so, it will fail."),
HIVE_REPL_TASK_FACTORY("hive.repl.task.factory",
"org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory",
"Parameter that can be used to override which ReplicationTaskFactory will be\n" +
"used to instantiate ReplicationTask events. Override for third party repl plugins"),
HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),
HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
"should rework the mapred work or not.\n" +
"This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."),
HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
"A list of io exception handler class names. This is used\n" +
"to construct a list exception handlers to handle exceptions thrown\n" +
"by record readers"),
// logging configuration
HIVE_LOG4J_FILE("hive.log4j.file", "",
"Hive log4j configuration file.\n" +
"If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "",
"Hive log4j configuration file for execution mode(sub command).\n" +
"If the property is not set, then logging will be initialized using hive-exec-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_ASYNC_LOG_ENABLED("hive.async.log.enabled", true,
"Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
" significant performance improvement as logging will be handled in separate thread\n" +
" that uses LMAX disruptor queue for buffering log messages.\n" +
" Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
" drawbacks."),
HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,
"Whether to log explain output for every query.\n" +
"When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level\n" +
"and in WebUI / Drilldown / Show Query."),
HIVE_EXPLAIN_USER("hive.explain.user", true,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Tez only."),
HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Spark only."),
// prefix used to auto generated column aliases (this should be started with '_')
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
"String used as a prefix when auto generating column alias.\n" +
"By default the prefix label will be appended with a column position number to form the column alias. \n" +
"Auto generation would happen if an aggregate function is used in a select clause without an explicit alias."),
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME(
"hive.autogen.columnalias.prefix.includefuncname", false,
"Whether to include function name in the column alias auto generated by Hive."),
HIVE_METRICS_CLASS("hive.service.metrics.class",
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
new StringSet(
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
"org.apache.hadoop.hive.common.metrics.LegacyMetrics"),
"Hive metrics subsystem implementation class."),
HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
"org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter",
"Comma separated list of reporter implementation classes for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+ "HIVE_METRICS_REPORTER conf if present"),
@Deprecated
HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
"Reporter implementations for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
"Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be"
+ " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " +
"Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " +
"This file will get overwritten at every interval."),
HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"the frequency of updating JSON metrics file."),
HIVE_METRICS_HADOOP2_INTERVAL("hive.service.metrics.hadoop2.frequency", "30s",
new TimeValidator(TimeUnit.SECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter, " +
"the frequency of updating the HADOOP2 metrics system."),
HIVE_METRICS_HADOOP2_COMPONENT_NAME("hive.service.metrics.hadoop2.component",
"hive",
"Component name to provide to Hadoop2 Metrics system. Ideally 'hivemetastore' for the MetaStore " +
" and and 'hiveserver2' for HiveServer2."
),
HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger",
"The class responsible for logging client side performance metrics. \n" +
"Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"),
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false,
"To cleanup the Hive scratchdir when starting the Hive Server"),
HIVE_SCRATCH_DIR_LOCK("hive.scratchdir.lock", false,
"To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir"),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
"Where to insert into multilevel directories like\n" +
"\"insert directory '/HIVEFT25686/chinna/' from table\""),
HIVE_CTAS_EXTERNAL_TABLES("hive.ctas.external.tables", true,
"whether CTAS for external tables is allowed"),
HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
"whether insert into external tables is allowed"),
HIVE_TEMPORARY_TABLE_STORAGE(
"hive.exec.temporary.table.storage", "default", new StringSet("memory",
"ssd", "default"), "Define the storage policy for temporary tables." +
"Choices between memory, ssd and default"),
HIVE_QUERY_LIFETIME_HOOKS("hive.query.lifetime.hooks", "",
"A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered" +
" before/after query compilation and before/after query execution, in the order specified." +
"Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they are" +
"specified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre " +
"and post query parsing"),
HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
"A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
"and end of Driver.run, these will be run in the order specified."),
HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null,
"The data format to use for DDL output. One of \"text\" (for human\n" +
"readable text) or \"json\" (for a json object)."),
HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@",
"Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname"),
HIVE_CAPTURE_TRANSFORM_ENTITY("hive.entity.capture.transform", false,
"Compiler to capture transform URI referred in the query"),
HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately", true,
"In older Hive version (0.10 and earlier) no distinction was made between\n" +
"partition columns or non-partition columns while displaying columns in describe\n" +
"table. From 0.12 onwards, they are displayed separately. This flag will let you\n" +
"get old behavior, if desired. See, test-case in patch for HIVE-6689."),
HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3",
"SSL Versions to disable for all Hive Servers"),
HIVE_PRIVILEGE_SYNCHRONIZER("hive.privilege.synchronizer", true,
"Whether to synchronize privileges from external authorizer periodically in HS2"),
HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL("hive.privilege.synchronizer.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to synchronize privileges from external authorizer periodically in HS2"),
// HiveServer2 specific configs
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir", false,
"Clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL("hive.server2.clear.dangling.scratchdir.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS("hive.server2.sleep.interval.between.start.attempts",
"60s", new TimeValidator(TimeUnit.MILLISECONDS, 0l, true, Long.MAX_VALUE, true),
"Amount of time to sleep between HiveServer2 start attempts. Primarily meant for tests"),
HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null),
"Number of times HiveServer2 will attempt to start before exiting. The sleep interval between retries" +
" is determined by " + ConfVars.HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS.varname +
"\n The default of 30 will keep trying for 30 minutes."),
HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY("hive.server2.support.dynamic.service.discovery", false,
"Whether HiveServer2 supports dynamic service discovery for its clients. " +
"To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself, " +
"when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: " +
"hive.zookeeper.quorum in their connection string."),
HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", "hiveserver2",
"The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."),
HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS("hive.server2.zookeeper.publish.configs", true,
"Whether we should publish HiveServer2's configs to ZooKeeper."),
HIVE_SERVER2_LEADER_ZOOKEEPER_NAMESPACE("hive.server2.leader.zookeeper.namespace", "hiveserver2-leader",
"Zookeeper znode for HiveServer2 leader selection."),
// HiveServer2 global init file location
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}",
"Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" +
"property is set, the value must be a valid path to an init file or directory where the init file is located."),
HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"),
"Transport mode of HiveServer2."),
HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "",
"Bind host on which to run the HiveServer2 Thrift service."),
HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
"enable parallel compilation of the queries between sessions and within the same session on HiveServer2. The default is false."),
HIVE_SERVER2_COMPILE_LOCK_TIMEOUT("hive.server2.compile.lock.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds a request will wait to acquire the compile lock before giving up. " +
"Setting it to 0s disables the timeout."),
HIVE_SERVER2_PARALLEL_OPS_IN_SESSION("hive.server2.parallel.ops.in.session", false,
"Whether to allow several parallel operations (such as SQL statements) in one session."),
HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL("hive.server2.materializedviews.registry.impl", "DEFAULT",
new StringSet("DEFAULT", "DUMMY"),
"The implementation that we should use for the materialized views registry. \n" +
" DEFAULT: Default cache for materialized views\n" +
" DUMMY: Do not cache materialized views and hence forward requests to metastore"),
// HiveServer2 WebUI
HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"),
HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on. This can be"
+ "set to 0 or a negative integer to disable the web UI"),
HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"),
HIVE_SERVER2_WEBUI_USE_SSL("hive.server2.webui.use.ssl", false,
"Set this to true for using SSL encryption for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH("hive.server2.webui.keystore.path", "",
"SSL certificate keystore location for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD("hive.server2.webui.keystore.password", "",
"SSL certificate keystore password for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_USE_SPNEGO("hive.server2.webui.use.spnego", false,
"If true, the HiveServer2 WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos."),
HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB("hive.server2.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the HiveServer2 WebUI SPNEGO service principal."),
HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL("hive.server2.webui.spnego.principal",
"HTTP/[email protected]", "The HiveServer2 WebUI SPNEGO service principal.\n" +
"The special string _HOST will be replaced automatically with \n" +
"the value of hive.server2.webui.host or the correct host name."),
HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES("hive.server2.webui.max.historic.queries", 25,
"The maximum number of past queries to show in HiverSever2 WebUI."),
HIVE_SERVER2_WEBUI_USE_PAM("hive.server2.webui.use.pam", false,
"If true, the HiveServer2 WebUI will be secured with PAM."),
HIVE_SERVER2_WEBUI_ENABLE_CORS("hive.server2.webui.enable.cors", false,
"Whether to enable cross origin requests (CORS)\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_ORIGINS("hive.server2.webui.cors.allowed.origins", "*",
"Comma separated list of origins that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_METHODS("hive.server2.webui.cors.allowed.methods", "GET,POST,DELETE,HEAD",
"Comma separated list of http methods that are allowed when CORS is enabled.\n"),
HIVE_SERVER2_WEBUI_CORS_ALLOWED_HEADERS("hive.server2.webui.cors.allowed.headers",
"X-Requested-With,Content-Type,Accept,Origin",
"Comma separated list of http headers that are allowed when CORS is enabled.\n"),
// Tez session settings
HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE("hive.server2.active.passive.ha.enable", false,
"Whether HiveServer2 Active/Passive High Availability be enabled when Hive Interactive sessions are enabled." +
"This will also require hive.server2.support.dynamic.service.discovery to be enabled."),
HIVE_SERVER2_AP_HA_RECOVER_SESSIONS("hive.server2.active.passive.ha.recover.sessions",
true, "Whether to recover sessions if using active-passive HA."),
HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE("hive.server2.active.passive.ha.registry.namespace",
"hs2ActivePassiveHA",
"When HiveServer2 Active/Passive High Availability is enabled, uses this namespace for registering HS2\n" +
"instances with zookeeper"),
HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE("hive.server2.tez.interactive.queue", "",
"A single YARN queues to use for Hive Interactive sessions. When this is specified,\n" +
"workload management is enabled and used for these sessions."),
HIVE_SERVER2_WM_WORKER_THREADS("hive.server2.wm.worker.threads", 4,
"Number of worker threads to use to perform the synchronous operations with Tez\n" +
"sessions for workload management (e.g. opening, closing, etc.)"),
HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC("hive.server2.wm.allow.any.pool.via.jdbc", false,
"Applies when a user specifies a target WM pool in the JDBC connection string. If\n" +
"false, the user can only specify a pool he is mapped to (e.g. make a choice among\n" +
"multiple group mappings); if true, the user can specify any existing pool."),
HIVE_SERVER2_WM_POOL_METRICS("hive.server2.wm.pool.metrics", true,
"Whether per-pool WM metrics should be enabled."),
HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT("hive.server2.tez.wm.am.registry.timeout", "30s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for AM registry registration, after which (on attempting to use the\n" +
"session), we kill it and try to get another one."),
HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "",
"A list of comma separated values corresponding to YARN queues of the same name.\n" +
"When HiveServer2 is launched in Tez mode, this configuration needs to be set\n" +
"for multiple Tez sessions to run in parallel on the cluster."),
HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1,
"A positive integer that determines the number of Tez sessions that should be\n" +
"launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" +
"Determines the parallelism on each queue."),
HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
false,
"This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" +
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
HIVE_SERVER2_TEZ_QUEUE_ACCESS_CHECK("hive.server2.tez.queue.access.check", false,
"Whether to check user access to explicitly specified YARN queues. " +
"yarn.resourcemanager.webapp.address must be configured to use this."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME("hive.server2.tez.session.lifetime", "162h",
new TimeValidator(TimeUnit.HOURS),
"The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\n" +
"Set to 0 to disable session expiration."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h",
new TimeValidator(TimeUnit.HOURS),
"The jitter for Tez session lifetime; prevents all the sessions from restarting at once."),
HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16,
"If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" +
"threads to use to initialize the default sessions."),
HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS("hive.server2.tez.sessions.restricted.configs", "",
"The configuration settings that cannot be set when submitting jobs to HiveServer2. If\n" +
"any of these are set to values different from those in the server configuration, an\n" +
"exception will be thrown."),
HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED("hive.server2.tez.sessions.custom.queue.allowed",
"true", new StringSet("true", "false", "ignore"),
"Whether Tez session pool should allow submitting queries to custom queues. The options\n" +
"are true, false (error out), ignore (accept the query but ignore the queue setting)."),
// Operation log configuration
HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
"When true, HS2 will save operation logs and make them available for clients"),
HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator +
"operation_logs",
"Top level directory where operation logs are stored if logging functionality is enabled"),
HIVE_SERVER2_LOGGING_OPERATION_LEVEL("hive.server2.logging.operation.level", "EXECUTION",
new StringSet("NONE", "EXECUTION", "PERFORMANCE", "VERBOSE"),
"HS2 operation logging mode available to clients to be set at session level.\n" +
"For this to work, hive.server2.logging.operation.enabled should be set to true.\n" +
" NONE: Ignore any logging\n" +
" EXECUTION: Log completion of tasks\n" +
" PERFORMANCE: Execution + Performance logs \n" +
" VERBOSE: All logs" ),
HIVE_SERVER2_OPERATION_LOG_CLEANUP_DELAY("hive.server2.operation.log.cleanup.delay", "300s",
new TimeValidator(TimeUnit.SECONDS), "When a query is cancelled (via kill query, query timeout or triggers),\n" +
" operation logs gets cleaned up after this delay"),
// HS2 connections guard rails
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER("hive.server2.limit.connections.per.user", 0,
"Maximum hive server2 connections per user. Any user exceeding this limit will not be allowed to connect. " +
"Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_IPADDRESS("hive.server2.limit.connections.per.ipaddress", 0,
"Maximum hive server2 connections per ipaddress. Any ipaddress exceeding this limit will not be allowed " +
"to connect. Default=0 does not enforce limits."),
HIVE_SERVER2_LIMIT_CONNECTIONS_PER_USER_IPADDRESS("hive.server2.limit.connections.per.user.ipaddress", 0,
"Maximum hive server2 connections per user:ipaddress combination. Any user-ipaddress exceeding this limit will " +
"not be allowed to connect. Default=0 does not enforce limits."),
// Enable metric collection for HiveServer2
HIVE_SERVER2_METRICS_ENABLED("hive.server2.metrics.enabled", false, "Enable metrics on the HiveServer2."),
// http (over thrift) transport settings
HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."),
HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice",
"Path component of URL endpoint when in HTTP mode."),
HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024,
"Maximum message size in bytes a HS2 server will accept."),
HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum idle time for a connection on the server when in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE("hive.server2.thrift.http.request.header.size", 6*1024,
"Request header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE("hive.server2.thrift.http.response.header.size", 6*1024,
"Response header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_COMPRESSION_ENABLED("hive.server2.thrift.http.compression.enabled", true,
"Enable thrift http compression via Jetty compression support"),
// Cookie based authentication when using HTTP Transport
HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true,
"When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum age in seconds for server side cookie used by HS2 in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null,
"Domain for the HS2 generated cookies"),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null,
"Path for the HS2 generated cookies"),
@Deprecated
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true,
"Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer2)."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true,
"HttpOnly attribute of the HS2 generated cookie."),
// binary transport settings
HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."),
HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth",
new StringSet("auth", "auth-int", "auth-conf"),
"Sasl QOP value; set it to one of following values to enable higher levels of\n" +
"protection for HiveServer2 communication with clients.\n" +
"Setting hadoop.rpc.protection to a higher level than HiveServer2 does not\n" +
"make sense in most situations. HiveServer2 ignores hadoop.rpc.protection in favor\n" +
"of hive.server2.thrift.sasl.qop.\n" +
" \"auth\" - authentication only (default)\n" +
" \"auth-int\" - authentication plus integrity protection\n" +
" \"auth-conf\" - authentication plus integrity and confidentiality protection\n" +
"This is applicable only if HiveServer2 is configured to use Kerberos authentication."),
HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5,
"Minimum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500,
"Maximum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH(
"hive.server2.thrift.exponential.backoff.slot.length", "100ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Binary exponential backoff slot time for Thrift clients during login to HiveServer2,\n" +
"for retries until hitting Thrift client timeout"),
HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT("hive.server2.thrift.login.timeout", "20s",
new TimeValidator(TimeUnit.SECONDS), "Timeout for Thrift clients during login to HiveServer2"),
HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
// Configuration for async thread pool in SessionManager
HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100,
"Number of threads in the async thread pool for HiveServer2"),
HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"How long HiveServer2 shutdown will wait for async threads to terminate."),
HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100,
"Size of the wait queue for async thread pool in HiveServer2.\n" +
"After hitting this limit, the async thread pool will reject new requests."),
HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" +
"to arrive before terminating"),
HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE("hive.server2.async.exec.async.compile", false,
"Whether to enable compiling async query asynchronously. If enabled, it is unknown if the query will have any resultset before compilation completed."),
HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"),
HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"),
// HiveServer2 auth configuration
HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"),
"Client authentication types.\n" +
" NONE: no authentication check\n" +
" LDAP: LDAP/AD based authentication\n" +
" KERBEROS: Kerberos/GSSAPI authentication\n" +
" CUSTOM: Custom authentication provider\n" +
" (Use with property hive.server2.custom.authentication.class)\n" +
" PAM: Pluggable authentication module\n" +
" NOSASL: Raw transport"),
HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true,
"Allow alternate user to be specified as part of HiveServer2 open connection request."),
HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", "",
"Kerberos keytab file for server principal"),
HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", "",
"Kerberos server principal"),
HIVE_SERVER2_CLIENT_KERBEROS_PRINCIPAL("hive.server2.authentication.client.kerberos.principal", "",
"Kerberos principal used by the HA hive_server2s."),
HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", "",
"keytab file for SPNego principal, optional,\n" +
"typical value would look like /etc/security/keytabs/spnego.service.keytab,\n" +
"This keytab would be used by HiveServer2 when Kerberos security is enabled and \n" +
"HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication.\n" +
"SPNego authentication would be honored only if valid\n" +
" hive.server2.authentication.spnego.principal\n" +
"and\n" +
" hive.server2.authentication.spnego.keytab\n" +
"are specified."),
HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", "",
"SPNego service principal, optional,\n" +
"typical value would look like HTTP/[email protected]\n" +
"SPNego service principal would be used by HiveServer2 when Kerberos security is enabled\n" +
"and HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication."),
HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null,
"LDAP connection URL(s),\n" +
"this value could contain URLs to multiple LDAP servers instances for HA,\n" +
"each LDAP URL is separated by a SPACE character. URLs are used in the \n" +
" order specified until a connection is successful."),
HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"),
HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null, ""),
HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN("hive.server2.authentication.ldap.groupDNPattern", null,
"COLON-separated list of patterns to use to find DNs for group entities in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER("hive.server2.authentication.ldap.groupFilter", null,
"COMMA-separated list of LDAP Group names (short name not full DNs).\n" +
"For example: HiveAdmins,HadoopAdmins,Administrators"),
HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN("hive.server2.authentication.ldap.userDNPattern", null,
"COLON-separated list of patterns to use to find DNs for users in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null,
"COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" +
"For example: hiveuser,impalauser,hiveadmin,hadoopadmin"),
HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid",
"LDAP attribute name whose values are unique in this LDAP server.\n" +
"For example: uid or CN."),
HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member",
"LDAP attribute name on the group object that contains the list of distinguished names\n" +
"for the user, group, and contact objects that are members of the group.\n" +
"For example: member, uniqueMember or memberUid"),
HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY(HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME, null,
"LDAP attribute name on the user object that contains groups of which the user is\n" +
"a direct member, except for the primary group, which is represented by the\n" +
"primaryGroupId.\n" +
"For example: memberOf"),
HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames",
"LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" +
"For example: group, groupOfNames or groupOfUniqueNames."),
HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null,
"A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" +
"If this query returns a null resultset, the LDAP Provider fails the Authentication\n" +
"request, succeeds if the user is part of the resultset." +
"For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n" +
"(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)" +
"(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))"),
HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null,
"Custom authentication class. Used when property\n" +
"'hive.server2.authentication' is set to 'CUSTOM'. Provided class\n" +
"must be a proper implementation of the interface\n" +
"org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" +
"will call its Authenticate(user, passed) method to authenticate requests.\n" +
"The implementation may optionally implement Hadoop's\n" +
"org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."),
HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null,
"List of the underlying pam services that should be used when auth type is PAM\n" +
"A file with the same name must exist in /etc/pam.d"),
HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true,
"Setting this property to true will have HiveServer2 execute\n" +
"Hive operations as the user making the calls to it."),
HIVE_DISTCP_DOAS_USER("hive.distcp.privileged.doAs","hive",
"This property allows privileged distcp executions done by hive\n" +
"to run as this user."),
HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC", new StringSet("CLASSIC", "HIVE"),
"This setting reflects how HiveServer2 will report the table types for JDBC and other\n" +
"client implementations that retrieve the available tables and supported table types\n" +
" HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n" +
" CLASSIC : More generic types like TABLE and VIEW"),
HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", "", ""),
// SSL settings
HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false,
"Set this to true for using SSL encryption in HiveServer2."),
HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", "",
"SSL certificate keystore location."),
HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", "",
"SSL certificate keystore password."),
HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE("hive.server2.map.fair.scheduler.queue", true,
"If the YARN fair scheduler is configured and HiveServer2 is running in non-impersonation mode,\n" +
"this setting determines the user for fair scheduler queue mapping.\n" +
"If set to true (default), the logged-in user determines the fair scheduler queue\n" +
"for submitted jobs, so that map reduce resource usage can be tracked by user.\n" +
"If set to false, all Hive jobs go to the 'hive' user's queue."),
HIVE_SERVER2_BUILTIN_UDF_WHITELIST("hive.server2.builtin.udf.whitelist", "",
"Comma separated list of builtin udf names allowed in queries.\n" +
"An empty whitelist allows all builtin udfs to be executed. " +
" The udf black list takes precedence over udf white list"),
HIVE_SERVER2_BUILTIN_UDF_BLACKLIST("hive.server2.builtin.udf.blacklist", "",
"Comma separated list of udfs names. These udfs will not be allowed in queries." +
" The udf black list takes precedence over udf white list"),
HIVE_ALLOW_UDF_LOAD_ON_DEMAND("hive.allow.udf.load.on.demand", false,
"Whether enable loading UDFs from metastore on demand; this is mostly relevant for\n" +
"HS2 and was the default behavior before Hive 1.2. Off by default."),
HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
"The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
"Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
" With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
" With negative value, it's checked for all of the operations regardless of state."),
HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION("hive.server2.idle.session.check.operation", true,
"Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" +
" This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" +
"(hive.server2.session.check.interval) are enabled."),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " +
"failure of Thrift HiveServer2 calls"),
HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " +
"retries while opening a connection to HiveServe2"),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s",
new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " +
"consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"),
HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" +
" client"),
HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " +
"thrift client"),
// ResultSet serialization settings
HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS("hive.server2.thrift.resultset.serialize.in.tasks", false,
"Whether we should serialize the Thrift structures used in JDBC ResultSet RPC in task nodes.\n " +
"We use SequenceFile and ThriftJDBCBinarySerDe to read and write the final results if this is true."),
// TODO: Make use of this config to configure fetch size
HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE("hive.server2.thrift.resultset.max.fetch.size",
10000, "Max number of rows sent in one Fetch RPC call by the server to the client."),
HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.thrift.resultset.default.fetch.size", 1000,
"The number of rows sent in one Fetch RPC call by the server to the client, if not\n" +
"specified by the client."),
HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false,
"If enabled, HiveServer2 will block any requests made to it over http " +
"if an X-XSRF-HEADER header is not present"),
HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist",
"set,reset,dfs,add,list,delete,reload,compile,llap",
"Comma separated list of non-SQL Hive commands users are authorized to execute"),
HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH("hive.server2.job.credential.provider.path", "",
"If set, this configuration property should provide a comma-separated list of URLs that indicates the type and " +
"location of providers to be used by hadoop credential provider API. It provides HiveServer2 the ability to provide job-specific " +
"credential providers for jobs run using MR and Spark execution engines. This functionality has not been tested against Tez."),
HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 15, new SizeValidator(0L, true, 1024L, true), "Number of threads"
+ " used to move files in move task. Set it to 0 to disable multi-threaded file moves. This parameter is also used by"
+ " MSCK to check tables."),
HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT("hive.load.dynamic.partitions.thread", 15,
new SizeValidator(1L, true, 1024L, true),
"Number of threads used to load dynamic partitions."),
// If this is set all move tasks at the end of a multi-insert query will only begin once all
// outputs are ready
HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
"hive.multi.insert.move.tasks.share.dependencies", false,
"If this is set all move tasks for tables/partitions (not directories) at the end of a\n" +
"multi-insert query will only begin once the dependencies for all these move tasks have been\n" +
"met.\n" +
"Advantages: If concurrency is enabled, the locks will only be released once the query has\n" +
" finished, so with this config enabled, the time when the table/partition is\n" +
" generated will be much closer to when the lock on it is released.\n" +
"Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n" +
" are produced by this query and finish earlier will be available for querying\n" +
" much earlier. Since the locks are only released once the query finishes, this\n" +
" does not apply if concurrency is enabled."),
HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false,
"If this is set, when writing partitions, the metadata will include the bucketing/sorting\n" +
"properties with which the data was written if any (this will not overwrite the metadata\n" +
"inherited from the table if the table is bucketed/sorted)"),
HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO(
"hive.exec.infer.bucket.sort.num.buckets.power.two", false,
"If this is set, when setting the number of reducers for the map reduce task which writes the\n" +
"final output files, it will choose a number which is a power of two, unless the user specifies\n" +
"the number of reducers to use using mapred.reduce.tasks. The number of reducers\n" +
"may be set to a power of two, only to be followed by a merge task meaning preventing\n" +
"anything from being inferred.\n" +
"With hive.exec.infer.bucket.sort set to true:\n" +
"Advantages: If this is not set, the number of buckets for partitions will seem arbitrary,\n" +
" which means that the number of mappers used for optimized joins, for example, will\n" +
" be very low. With this set, since the number of buckets used for any partition is\n" +
" a power of two, the number of mappers used for optimized joins will be the least\n" +
" number of buckets used by any partition being joined.\n" +
"Disadvantages: This may mean a much larger or much smaller number of reducers being used in the\n" +
" final map reduce job, e.g. if a job was originally going to take 257 reducers,\n" +
" it will now take 512 reducers, similarly if the max number of reducers is 511,\n" +
" and a job was going to use this many, it will now use 256 reducers."),
HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false,
"Enable list bucketing optimizer. Default value is false so that we disable it by default."),
// Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."),
SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."),
HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false,
"Whether to show the unquoted partition names in query results."),
HIVE_EXECUTION_ENGINE("hive.execution.engine", "tez", new StringSet(true, "tez", "spark", "mr"),
"Chooses execution engine. Options are: mr (Map reduce, default), tez, spark. While MR\n" +
"remains the default engine for historical reasons, it is itself a historical engine\n" +
"and is deprecated in Hive 2 line. It may be removed without further warning."),
HIVE_EXECUTION_MODE("hive.execution.mode", "container", new StringSet("container", "llap"),
"Chooses whether query fragments will run in container or in llap"),
HIVE_JAR_DIRECTORY("hive.jar.directory", null,
"This is the location hive in tez mode will look for to find a site wide \n" +
"installed hive instance."),
HIVE_USER_INSTALL_DIR("hive.user.install.directory", "/user/",
"If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \n" +
"it will upload the hive jar to \"hive.user.install.directory/user.name\"\n" +
"and use it to run queries."),
// Vectorization enabled
HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", true,
"This flag should be set to true to enable vectorized mode of query execution.\n" +
"The default value is true to reflect that our most expected Hive deployment will be using vectorization."),
HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true,
"This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" +
"of queries using MapJoin.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false,
"This flag should be set to true to restrict use of native vector map join hash tables to\n" +
"the MultiKey in queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false,
"This flag should be set to true to enable vector map join hash tables to\n" +
"use max / max filtering for integer join queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1,
"The number of small table rows for a match in vector map join hash tables\n" +
"where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" +
"A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false,
"This flag should be set to true to enable use of native fast vector map join hash tables in\n" +
"queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000,
"Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."),
HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000,
"Max number of entries in the vector group by aggregation hashtables. \n" +
"Exceeding this will trigger a flush irrelevant of memory pressure condition."),
HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
"Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
"This flag should be set to true to enable the new vectorization\n" +
"of queries using ReduceSink.\ni" +
"The default value is true."),
HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT("hive.vectorized.use.vectorized.input.format", true,
"This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\n" +
"The default value is true."),
HIVE_VECTORIZATION_VECTORIZED_INPUT_FILE_FORMAT_EXCLUDES("hive.vectorized.input.format.excludes","",
"This configuration should be set to fully described input format class names for which \n"
+ " vectorized input format should not be used for vectorized execution."),
HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", true,
"This flag should be set to true to enable vectorizing rows using vector deserialize.\n" +
"The default value is true."),
HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", true,
"This flag should be set to true to enable vectorizing using row deserialize.\n" +
"The default value is false."),
HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES(
"hive.vectorized.row.serde.inputformat.excludes",
"org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"The input formats not supported by row deserialize vectorization."),
HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", "all", new StringSet("none", "chosen", "all"),
"Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a corresponding vectorized class.\n" +
"0. none : disable any usage of VectorUDFAdaptor\n" +
"1. chosen : use VectorUDFAdaptor for a small set of UDFs that were chosen for good performance\n" +
"2. all : use VectorUDFAdaptor for all UDFs"
),
HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", false,
"internal use only, used to force always using the VectorUDFAdaptor.\n" +
"The default is false, of course",
true),
HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", true,
"This flag should be set to true to enable vectorized mode of the PTF of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_PTF_MAX_MEMORY_BUFFERING_BATCH_COUNT("hive.vectorized.ptf.max.memory.buffering.batch.count", 25,
"Maximum number of vectorized row batches to buffer in memory for PTF\n" +
"The default value is 25"),
HIVE_VECTORIZATION_TESTING_REDUCER_BATCH_SIZE("hive.vectorized.testing.reducer.batch.size", -1,
"internal use only, used for creating small group key vectorized row batches to exercise more logic\n" +
"The default value is -1 which means don't restrict for testing",
true),
HIVE_VECTORIZATION_TESTING_REUSE_SCRATCH_COLUMNS("hive.vectorized.reuse.scratch.columns", true,
"internal use only. Disable this to debug scratch column state issues",
true),
HIVE_VECTORIZATION_COMPLEX_TYPES_ENABLED("hive.vectorized.complex.types.enabled", true,
"This flag should be set to true to enable vectorization\n" +
"of expressions with complex types.\n" +
"The default value is true."),
HIVE_VECTORIZATION_GROUPBY_COMPLEX_TYPES_ENABLED("hive.vectorized.groupby.complex.types.enabled", true,
"This flag should be set to true to enable group by vectorization\n" +
"of aggregations that use complex types.\n",
"For example, AVG uses a complex type (STRUCT) for partial aggregation results" +
"The default value is true."),
HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED("hive.vectorized.row.identifier.enabled", true,
"This flag should be set to true to enable vectorization of ROW__ID."),
HIVE_VECTORIZATION_USE_CHECKED_EXPRESSIONS("hive.vectorized.use.checked.expressions", false,
"This flag should be set to true to use overflow checked vector expressions when available.\n" +
"For example, arithmetic expressions which can overflow the output data type can be evaluated using\n" +
" checked vector expressions so that they produce same result as non-vectorized evaluation."),
HIVE_VECTORIZED_ADAPTOR_SUPPRESS_EVALUATE_EXCEPTIONS(
"hive.vectorized.adaptor.suppress.evaluate.exceptions", false,
"This flag should be set to true to suppress HiveException from the generic UDF function\n" +
"evaluate call and turn them into NULLs. Assume, by default, this is not needed"),
HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED(
"hive.vectorized.input.format.supports.enabled",
"decimal_64",
"Which vectorized input format support features are enabled for vectorization.\n" +
"That is, if a VectorizedInputFormat input format does support \"decimal_64\" for example\n" +
"this variable must enable that to be used in vectorization"),
HIVE_VECTORIZED_IF_EXPR_MODE("hive.vectorized.if.expr.mode", "better", new StringSet("adaptor", "good", "better"),
"Specifies the extent to which SQL IF statements will be vectorized.\n" +
"0. adaptor: only use the VectorUDFAdaptor to vectorize IF statements\n" +
"1. good : use regular vectorized IF expression classes that get good performance\n" +
"2. better : use vectorized IF expression classes that conditionally execute THEN/ELSE\n" +
" expressions for better performance.\n"),
HIVE_TEST_VECTORIZATION_ENABLED_OVERRIDE("hive.test.vectorized.execution.enabled.override",
"none", new StringSet("none", "enable", "disable"),
"internal use only, used to override the hive.vectorized.execution.enabled setting and\n" +
"turn off vectorization. The default is false, of course",
true),
HIVE_TEST_VECTORIZATION_SUPPRESS_EXPLAIN_EXECUTION_MODE(
"hive.test.vectorization.suppress.explain.execution.mode", false,
"internal use only, used to suppress \"Execution mode: vectorized\" EXPLAIN display.\n" +
"The default is false, of course",
true),
HIVE_TEST_VECTORIZER_SUPPRESS_FATAL_EXCEPTIONS(
"hive.test.vectorizer.suppress.fatal.exceptions", true,
"internal use only. When false, don't suppress fatal exceptions like\n" +
"NullPointerException, etc so the query will fail and assure it will be noticed",
true),
HIVE_VECTORIZATION_FILESINK_ARROW_NATIVE_ENABLED(
"hive.vectorized.execution.filesink.arrow.native.enabled", true,
"This flag should be set to true to enable the native vectorization\n" +
"of queries using the Arrow SerDe and FileSink.\n" +
"The default value is true."),
HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
+ "whether to check, convert, and normalize partition value to conform to its column type in "
+ "partition operations including but not limited to insert, such as alter, describe etc."),
HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null,
"For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" +
"using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."),
HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false,
"Whether to send the query plan via local resource or RPC"),
HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true,
"Whether to generate the splits locally or in the AM (tez only)"),
HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true,
"Whether to generate consistent split locations when generating splits in the AM"),
HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_SPARK_TIMEOUT("hive.prewarm.spark.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait to finish prewarming spark executors"),
HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""),
HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""),
HIVECOUNTERGROUP("hive.counters.group.name", "HIVE",
"The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"),
HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
new StringSet("none", "column"),
"Whether to use quoted identifier. 'none' or 'column' can be used. \n" +
" none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
" column: implies column names can contain any character."
),
/**
* @deprecated Use MetastoreConf.SUPPORT_SPECIAL_CHARACTERS_IN_TABLE_NAMES
*/
@Deprecated
HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true,
"This flag should be set to true to enable support for special characters in table names.\n"
+ "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", false,
"Whether the eligible tables should be created as ACID insert-only by default. Does \n" +
"not apply to external tables, the ones using storage handlers, etc."),
// role names are case-insensitive
USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
"Comma separated list of users who are in admin role for bootstrapping.\n" +
"More users can be added in ADMIN role later."),
HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL,
"Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\n" +
"Setting to 0.12:\n" +
" Maintains division behavior: int / int = double"),
HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", true,
"Whether joins can be automatically converted to bucket map joins in hive \n" +
"when tez is used as the execution engine."),
HIVE_TEZ_BMJ_USE_SUBCACHE("hive.tez.bmj.use.subcache", true,
"Use subcache to reuse hashtable across multiple tasks"),
HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true,
"Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."),
HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait for another thread to localize the same resource for hive-tez."),
HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5,
"The number of attempts waiting for localizing a resource in hive-tez."),
TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false,
"Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" +
"and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" +
"necessary."),
TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR("hive.tez.llap.min.reducer.per.executor", 0.33f,
"If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\n" +
"be set to this fraction of the number of executors."),
TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f,
"When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."),
TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f,
"When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\n" +
"of reducers that tez specifies."),
TEZ_OPTIMIZE_BUCKET_PRUNING(
"hive.tez.bucket.pruning", false,
"When pruning is enabled, filters on bucket columns will be processed by \n" +
"filtering the splits against a bitset of included buckets. This needs predicates \n"+
"produced by hive.optimize.ppd and hive.optimize.index.filters."),
TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
"hive.tez.bucket.pruning.compat", true,
"When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" +
"This occasionally doubles the data scan cost, but is default enabled for safety"),
TEZ_DYNAMIC_PARTITION_PRUNING(
"hive.tez.dynamic.partition.pruning", true,
"When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" +
"events from the processing vertices to the Tez application master. These events will be\n" +
"used to prune unnecessary partitions."),
TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED("hive.tez.dynamic.partition.pruning.extended", true,
"Whether we should try to create additional opportunities for dynamic pruning, e.g., considering\n" +
"siblings that may not be created by normal dynamic pruning logic.\n" +
"Only works when dynamic pruning is enabled."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L,
"Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE("hive.tez.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size of events in dynamic pruning."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION("hive.tez.dynamic.semijoin.reduction", true,
"When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This " +
"requires hive.tez.dynamic.partition.pruning to be enabled."),
TEZ_MIN_BLOOM_FILTER_ENTRIES("hive.tez.min.bloom.filter.entries", 1000000L,
"Bloom filter should be of at min certain size to be effective"),
TEZ_MAX_BLOOM_FILTER_ENTRIES("hive.tez.max.bloom.filter.entries", 100000000L,
"Bloom filter should be of at max certain size to be effective"),
TEZ_BLOOM_FILTER_FACTOR("hive.tez.bloom.filter.factor", (float) 1.0,
"Bloom filter should be a multiple of this factor with nDV"),
TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction", 100000000L,
"Big table for runtime filteting should be of atleast this size"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold", (float) 0.50,
"Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN("hive.tez.dynamic.semijoin.reduction.for.mapjoin", false,
"Use a semi-join branch for map-joins. This may not make it faster, but is helpful in certain join patterns."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_DPP_FACTOR("hive.tez.dynamic.semijoin.reduction.for.dpp.factor",
(float) 1.0,
"The factor to decide if semijoin branch feeds into a TableScan\n" +
"which has an outgoing Dynamic Partition Pruning (DPP) branch based on number of distinct values."),
TEZ_SMB_NUMBER_WAVES(
"hive.tez.smb.number.waves",
(float) 0.5,
"The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."),
TEZ_EXEC_SUMMARY(
"hive.tez.exec.print.summary",
false,
"Display breakdown of execution steps, for every query executed by the shell."),
TEZ_SESSION_EVENTS_SUMMARY(
"hive.tez.session.events.print.summary",
"none", new StringSet("none", "text", "json"),
"Display summary of all tez sessions related events in text or json format"),
TEZ_EXEC_INPLACE_PROGRESS(
"hive.tez.exec.inplace.progress",
true,
"Updates tez job execution progress in-place in the terminal when hive-cli is used."),
HIVE_SERVER2_INPLACE_PROGRESS(
"hive.server2.in.place.progress",
true,
"Allows hive server 2 to send progress bar update information. This is currently available"
+ " only if the execution engine is tez."),
TEZ_DAG_STATUS_CHECK_INTERVAL("hive.tez.dag.status.check.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Interval between subsequent DAG status invocation."),
SPARK_EXEC_INPLACE_PROGRESS("hive.spark.exec.inplace.progress", true,
"Updates spark job execution progress in-place in the terminal."),
TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION("hive.tez.container.max.java.heap.fraction", 0.8f,
"This is to override the tez setting with the same name"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN("hive.tez.task.scale.memory.reserve-fraction.min",
0.3f, "This is to override the tez setting tez.task.scale.memory.reserve-fraction"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX("hive.tez.task.scale.memory.reserve.fraction.max",
0.5f, "The maximum fraction of JVM memory which Tez will reserve for the processor"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION("hive.tez.task.scale.memory.reserve.fraction",
-1f, "The customized fraction of JVM memory which Tez will reserve for the processor"),
TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED("hive.tez.cartesian-product.enabled",
false, "Use Tez cartesian product edge to speed up cross product"),
// The default is different on the client and server, so it's null here.
LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."),
LLAP_IO_ROW_WRAPPER_ENABLED("hive.llap.io.row.wrapper.enabled", true, "Whether the LLAP IO row wrapper is enabled for non-vectorized queries."),
LLAP_IO_ACID_ENABLED("hive.llap.io.acid", true, "Whether the LLAP IO layer is enabled for ACID."),
LLAP_IO_TRACE_SIZE("hive.llap.io.trace.size", "2Mb",
new SizeValidator(0L, true, (long)Integer.MAX_VALUE, false),
"The buffer size for a per-fragment LLAP debug trace. 0 to disable."),
LLAP_IO_TRACE_ALWAYS_DUMP("hive.llap.io.trace.always.dump", false,
"Whether to always dump the LLAP IO trace (if enabled); the default is on error."),
LLAP_IO_NONVECTOR_WRAPPER_ENABLED("hive.llap.io.nonvector.wrapper.enabled", true,
"Whether the LLAP IO layer is enabled for non-vectorized queries that read inputs\n" +
"that can be vectorized"),
LLAP_IO_MEMORY_MODE("hive.llap.io.memory.mode", "cache",
new StringSet("cache", "none"),
"LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
"custom off-heap allocator, 'none' doesn't use either (this mode may result in\n" +
"significant performance degradation)"),
LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "4Kb", new SizeValidator(),
"Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
"padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
"compression buffer size, or next lowest power of 2. Must be a power of 2."),
LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
"Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
"the largest expected ORC compression buffer size. Must be a power of 2."),
LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
"Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
"(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
"not the case, an adjusted size will be used. Using powers of 2 is recommended."),
LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(),
"Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
"Whether ORC low-level cache should use direct allocation."),
LLAP_ALLOCATOR_PREALLOCATE("hive.llap.io.allocator.preallocate", true,
"Whether to preallocate the entire IO memory at init time."),
LLAP_ALLOCATOR_MAPPED("hive.llap.io.allocator.mmap", false,
"Whether ORC low-level cache should use memory mapped allocation (direct I/O). \n" +
"This is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage."),
LLAP_ALLOCATOR_MAPPED_PATH("hive.llap.io.allocator.mmap.path", "/tmp",
new WritableDirectoryValidator(),
"The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache."),
LLAP_ALLOCATOR_DISCARD_METHOD("hive.llap.io.allocator.discard.method", "both",
new StringSet("freelist", "brute", "both"),
"Which method to use to force-evict blocks to deal with fragmentation:\n" +
"freelist - use half-size free list (discards less, but also less reliable); brute -\n" +
"brute force, discard whatever we can; both - first try free list, then brute force."),
LLAP_ALLOCATOR_DEFRAG_HEADROOM("hive.llap.io.allocator.defrag.headroom", "1Mb",
"How much of a headroom to leave to allow allocator more flexibility to defragment.\n" +
"The allocator would further cap it to a fraction of total memory."),
LLAP_TRACK_CACHE_USAGE("hive.llap.io.track.cache.usage", true,
"Whether to tag LLAP cache contents, mapping them to Hive entities (paths for\n" +
"partitions and tables) for reporting."),
LLAP_USE_LRFU("hive.llap.io.use.lrfu", true,
"Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."),
LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.000001f,
"Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" +
"behave like LFU, 1 makes it behave like LRU, values in between balance accordingly.\n" +
"The meaning of this parameter is the inverse of the number of time ticks (cache\n" +
" operations, currently) that cause the combined recency-frequency of a block in cache\n" +
" to be halved."),
LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", true,
"Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" +
"like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" +
"FS), the cache would not work by default because LLAP is unable to uniquely track the\n" +
"files; enabling this setting allows LLAP to generate file ID from the path, size and\n" +
"modification time, which is almost certain to identify file uniquely. However, if you\n" +
"use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" +
"to avoid this setting."),
LLAP_CACHE_DEFAULT_FS_FILE_ID("hive.llap.cache.defaultfs.only.native.fileid", true,
"Whether LLAP cache should use native file IDs from the default FS only. This is to\n" +
"avoid file ID collisions when several different DFS instances are in use at the same\n" +
"time. Disable this check to allow native file IDs from non-default DFS."),
LLAP_CACHE_ENABLE_ORC_GAP_CACHE("hive.llap.orc.gap.cache", true,
"Whether LLAP cache for ORC should remember gaps in ORC compression buffer read\n" +
"estimates, to avoid re-reading the data that was read once and discarded because it\n" +
"is unneeded. This is only necessary for ORC files written before HIVE-9660."),
LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true,
"Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" +
"cases of file overwrites. This is supported on HDFS. Disabling this also turns off any\n" +
"cache consistency checks based on fileid comparisons."),
// Restricted to text for now as this is a new feature; only text files can be sliced.
LLAP_IO_ENCODE_ENABLED("hive.llap.io.encode.enabled", true,
"Whether LLAP should try to re-encode and cache data for non-ORC formats. This is used\n" +
"on LLAP Server side to determine if the infrastructure for that is initialized."),
LLAP_IO_ENCODE_FORMATS("hive.llap.io.encode.formats",
"org.apache.hadoop.mapred.TextInputFormat,",
"The table input formats for which LLAP IO should re-encode and cache data.\n" +
"Comma-separated list."),
LLAP_IO_ENCODE_ALLOC_SIZE("hive.llap.io.encode.alloc.size", "256Kb", new SizeValidator(),
"Allocation size for the buffers used to cache encoded data from non-ORC files. Must\n" +
"be a power of two between " + LLAP_ALLOCATOR_MIN_ALLOC + " and\n" +
LLAP_ALLOCATOR_MAX_ALLOC + "."),
LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED("hive.llap.io.encode.vector.serde.enabled", true,
"Whether LLAP should use vectorized SerDe reader to read text data when re-encoding."),
LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED("hive.llap.io.encode.vector.serde.async.enabled",
true,
"Whether LLAP should use async mode in vectorized SerDe reader to read text data."),
LLAP_IO_ENCODE_SLICE_ROW_COUNT("hive.llap.io.encode.slice.row.count", 100000,
"Row count to use to separate cache slices when reading encoded data from row-based\n" +
"inputs into LLAP cache, if this feature is enabled."),
LLAP_IO_ENCODE_SLICE_LRR("hive.llap.io.encode.slice.lrr", true,
"Whether to separate cache slices when reading encoded data from text inputs via MR\n" +
"MR LineRecordRedader into LLAP cache, if this feature is enabled. Safety flag."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"),
LLAP_IO_VRB_QUEUE_LIMIT_BASE("hive.llap.io.vrb.queue.limit.base", 50000,
"The default queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO. The actual queue size is set per fragment, and is adjusted down\n" +
"from the base, depending on the schema."),
LLAP_IO_VRB_QUEUE_LIMIT_MIN("hive.llap.io.vrb.queue.limit.min", 10,
"The minimum queue size for VRBs produced by a LLAP IO thread when the processing is\n" +
"slower than the IO (used when determining the size from base size)."),
LLAP_IO_SHARE_OBJECT_POOLS("hive.llap.io.share.object.pools", false,
"Whether to used shared object pools in LLAP IO. A safety flag."),
LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false,
"Whether or not to allow the planner to run vertices in the AM."),
LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true,
"Enforce that all parents are in llap, before considering vertex"),
LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true,
"Enforce that inputs are vectorized, before considering vertex"),
LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true,
"Enforce that col stats are available, before considering vertex"),
LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L,
"Check input size, before considering vertex (-1 disables check)"),
LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L,
"Check output size, before considering vertex (-1 disables check)"),
LLAP_SKIP_COMPILE_UDF_CHECK("hive.llap.skip.compile.udf.check", false,
"Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\n" +
"execute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\n" +
"jars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load."),
LLAP_ALLOW_PERMANENT_FNS("hive.llap.allow.permanent.fns", true,
"Whether LLAP decider should allow permanent UDFs."),
LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none",
new StringSet("auto", "none", "all", "map", "only"),
"Chooses whether query fragments will run in container or in llap"),
LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true,
"Cache objects (plans, hashtables, etc) in llap"),
LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS("hive.llap.io.decoding.metrics.percentiles.intervals", "30",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
"for percentile latency metrics on the LLAP daemon IO decoding time.\n" +
"hive.llap.queue.metrics.percentiles.intervals"),
LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10,
"Specify the number of threads to use for low-level IO thread pool."),
LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "",
"The name of the LLAP daemon's service principal."),
LLAP_KERBEROS_KEYTAB_FILE("hive.llap.daemon.keytab.file", "",
"The path to the Kerberos Keytab file containing the LLAP daemon's service principal."),
LLAP_WEBUI_SPNEGO_KEYTAB_FILE("hive.llap.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the LLAP WebUI SPNEGO principal.\n" +
"Typical value would look like /etc/security/keytabs/spnego.service.keytab."),
LLAP_WEBUI_SPNEGO_PRINCIPAL("hive.llap.webui.spnego.principal", "",
"The LLAP WebUI SPNEGO service principal. Configured similarly to\n" +
"hive.server2.webui.spnego.principal"),
LLAP_FS_KERBEROS_PRINCIPAL("hive.llap.task.principal", "",
"The name of the principal to use to run tasks. By default, the clients are required\n" +
"to provide tokens to access HDFS/etc."),
LLAP_FS_KERBEROS_KEYTAB_FILE("hive.llap.task.keytab.file", "",
"The path to the Kerberos Keytab file containing the principal to use to run tasks.\n" +
"By default, the clients are required to provide tokens to access HDFS/etc."),
LLAP_ZKSM_ZK_CONNECTION_STRING("hive.llap.zk.sm.connectionString", "",
"ZooKeeper connection string for ZooKeeper SecretManager."),
LLAP_ZKSM_ZK_SESSION_TIMEOUT("hive.llap.zk.sm.session.timeout", "40s", new TimeValidator(
TimeUnit.MILLISECONDS), "ZooKeeper session timeout for ZK SecretManager."),
LLAP_ZK_REGISTRY_USER("hive.llap.zk.registry.user", "",
"In the LLAP ZooKeeper-based registry, specifies the username in the Zookeeper path.\n" +
"This should be the hive user or whichever user is running the LLAP daemon."),
LLAP_ZK_REGISTRY_NAMESPACE("hive.llap.zk.registry.namespace", null,
"In the LLAP ZooKeeper-based registry, overrides the ZK path namespace. Note that\n" +
"using this makes the path management (e.g. setting correct ACLs) your responsibility."),
// Note: do not rename to ..service.acl; Hadoop generates .hosts setting name from this,
// resulting in a collision with existing hive.llap.daemon.service.hosts and bizarre errors.
// These are read by Hadoop IPC, so you should check the usage and naming conventions (e.g.
// ".blocked" is a string hardcoded by Hadoop, and defaults are enforced elsewhere in Hive)
// before making changes or copy-pasting these.
LLAP_SECURITY_ACL("hive.llap.daemon.acl", "*", "The ACL for LLAP daemon."),
LLAP_SECURITY_ACL_DENY("hive.llap.daemon.acl.blocked", "", "The deny ACL for LLAP daemon."),
LLAP_MANAGEMENT_ACL("hive.llap.management.acl", "*", "The ACL for LLAP daemon management."),
LLAP_MANAGEMENT_ACL_DENY("hive.llap.management.acl.blocked", "",
"The deny ACL for LLAP daemon management."),
LLAP_PLUGIN_ACL("hive.llap.plugin.acl", "*", "The ACL for LLAP plugin AM endpoint."),
LLAP_PLUGIN_ACL_DENY("hive.llap.plugin.acl.blocked", "",
"The deny ACL for LLAP plugin AM endpoint."),
LLAP_REMOTE_TOKEN_REQUIRES_SIGNING("hive.llap.remote.token.requires.signing", "true",
new StringSet("false", "except_llap_owner", "true"),
"Whether the token returned from LLAP management API should require fragment signing.\n" +
"True by default; can be disabled to allow CLI to get tokens from LLAP in a secure\n" +
"cluster by setting it to true or 'except_llap_owner' (the latter returns such tokens\n" +
"to everyone except the user LLAP cluster is authenticating under)."),
// Hadoop DelegationTokenManager default is 1 week.
LLAP_DELEGATION_TOKEN_LIFETIME("hive.llap.daemon.delegation.token.lifetime", "14d",
new TimeValidator(TimeUnit.SECONDS),
"LLAP delegation token lifetime, in seconds if specified without a unit."),
LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004,
"RPC port for LLAP daemon management service."),
LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", false,
"Whether or not to set Hadoop configs to enable auth in LLAP web app."),
LLAP_DAEMON_RPC_NUM_HANDLERS("hive.llap.daemon.rpc.num.handlers", 5,
"Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers"),
LLAP_PLUGIN_RPC_NUM_HANDLERS("hive.llap.plugin.rpc.num.handlers", 1,
"Number of RPC handlers for AM LLAP plugin endpoint."),
LLAP_DAEMON_WORK_DIRS("hive.llap.daemon.work.dirs", "",
"Working directories for the daemon. This should not be set if running as a YARN\n" +
"Service. It must be set when not running on YARN. If the value is set when\n" +
"running as a YARN Service, the specified value will be used.",
"llap.daemon.work.dirs"),
LLAP_DAEMON_YARN_SHUFFLE_PORT("hive.llap.daemon.yarn.shuffle.port", 15551,
"YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port"),
LLAP_DAEMON_YARN_CONTAINER_MB("hive.llap.daemon.yarn.container.mb", -1,
"llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb"),
LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null,
"Queue name within which the llap application will run." +
" Used in LlapServiceDriver and package.py"),
// TODO Move the following 2 properties out of Configuration to a constant.
LLAP_DAEMON_CONTAINER_ID("hive.llap.daemon.container.id", null,
"ContainerId of a running LlapDaemon. Used to publish to the registry"),
LLAP_DAEMON_NM_ADDRESS("hive.llap.daemon.nm.address", null,
"NM Address host:rpcPort for the NodeManager on which the instance of the daemon is running.\n" +
"Published to the llap registry. Should never be set by users"),
LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false,
"TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"),
LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS(
"hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\n" +
"interval, but otherwise as high as possible to avoid unnecessary traffic.",
"llap.daemon.am.liveness.heartbeat.interval-ms"),
LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS(
"hive.llap.am.liveness.connection.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Amount of time to wait on connection failures to the AM from an LLAP daemon before\n" +
"considering the AM to be dead.", "llap.am.liveness.connection.timeout-millis"),
LLAP_DAEMON_AM_USE_FQDN("hive.llap.am.use.fqdn", true,
"Whether to use FQDN of the AM machine when submitting work to LLAP."),
// Not used yet - since the Writable RPC engine does not support this policy.
LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration while waiting to retry connection failures to the AM from the daemon for\n" +
"the general keep-alive thread (milliseconds).",
"llap.am.liveness.connection.sleep-between-retries-millis"),
LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS(
"hive.llap.task.scheduler.timeout.seconds", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Amount of time to wait before failing the query when there are no llap daemons running\n" +
"(alive) in the cluster.", "llap.daemon.scheduler.timeout.seconds"),
LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4,
"Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" +
"executed in parallel.", "llap.daemon.num.executors"),
LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR("hive.llap.mapjoin.memory.oversubscribe.factor", 0.2f,
"Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\n" +
"by queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n"),
LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY("hive.llap.memory.oversubscription.max.executors.per.query",
-1,
"Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\n" +
"which memory for mapjoin can be borrowed. Default 3 (from 3 other executors\n" +
"hive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\n" +
"conversion decision will be made). This is only an upper bound. Lower bound is determined by number of\n" +
"executors and configured max concurrency."),
LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL("hive.llap.mapjoin.memory.monitor.check.interval", 100000L,
"Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\n" +
"memory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\n" +
"when running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature."),
LLAP_DAEMON_AM_REPORTER_MAX_THREADS("hive.llap.daemon.am-reporter.max.threads", 4,
"Maximum number of threads to be used for AM reporter. If this is lower than number of\n" +
"executors in llap daemon, it would be set to number of executors at runtime.",
"llap.daemon.am-reporter.max.threads"),
LLAP_DAEMON_RPC_PORT("hive.llap.daemon.rpc.port", 0, "The LLAP daemon RPC port.",
"llap.daemon.rpc.port. A value of 0 indicates a dynamic port"),
LLAP_DAEMON_MEMORY_PER_INSTANCE_MB("hive.llap.daemon.memory.per.instance.mb", 4096,
"The total amount of memory to use for the executors inside LLAP (in megabytes).",
"llap.daemon.memory.per.instance.mb"),
LLAP_DAEMON_XMX_HEADROOM("hive.llap.daemon.xmx.headroom", "5%",
"The total amount of heap memory set aside by LLAP and not used by the executors. Can\n" +
"be specified as size (e.g. '512Mb'), or percentage (e.g. '5%'). Note that the latter is\n" +
"derived from the total daemon XMX, which can be different from the total executor\n" +
"memory if the cache is on-heap; although that's not the default configuration."),
LLAP_DAEMON_VCPUS_PER_INSTANCE("hive.llap.daemon.vcpus.per.instance", 4,
"The total number of vcpus to use for the executors inside LLAP.",
"llap.daemon.vcpus.per.instance"),
LLAP_DAEMON_NUM_FILE_CLEANER_THREADS("hive.llap.daemon.num.file.cleaner.threads", 1,
"Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads"),
LLAP_FILE_CLEANUP_DELAY_SECONDS("hive.llap.file.cleanup.delay.seconds", "300s",
new TimeValidator(TimeUnit.SECONDS),
"How long to delay before cleaning up query files in LLAP (in seconds, for debugging).",
"llap.file.cleanup.delay-seconds"),
LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null,
"Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" +
"YARN registry is used.", "llap.daemon.service.hosts"),
LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s",
new TimeValidator(TimeUnit.SECONDS),
"LLAP YARN registry service list refresh delay, in seconds.",
"llap.daemon.service.refresh.interval"),
LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10,
"Number of threads to use in LLAP task communicator in Tez AM.",
"llap.daemon.communicator.num.threads"),
LLAP_PLUGIN_CLIENT_NUM_THREADS("hive.llap.plugin.client.num.threads", 10,
"Number of threads to use in LLAP task plugin client."),
LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS("hive.llap.daemon.download.permanent.fns", false,
"Whether LLAP daemon should localize the resources for permanent UDFs."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME("hive.llap.task.scheduler.am.registry", "llap",
"AM registry name for LLAP task scheduler plugin to register with."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_PRINCIPAL("hive.llap.task.scheduler.am.registry.principal", "",
"The name of the principal used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_AM_REGISTRY_KEYTAB_FILE("hive.llap.task.scheduler.am.registry.keytab.file", "",
"The path to the Kerberos keytab file used to access ZK AM registry securely."),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Minimum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.min.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.max.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR(
"hive.llap.task.scheduler.node.disable.backoff.factor", 1.5f,
"Backoff factor on successive blacklists of a node due to some failures. Blacklist times\n" +
"start at the min timeout and go up to the max timeout based on this backoff factor.",
"llap.task.scheduler.node.disable.backoff.factor"),
LLAP_TASK_SCHEDULER_PREEMPT_INDEPENDENT("hive.llap.task.scheduler.preempt.independent", false,
"Whether the AM LLAP scheduler should preempt a lower priority task for a higher pri one\n" +
"even if the former doesn't depend on the latter (e.g. for two parallel sides of a union)."),
LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE(
"hive.llap.task.scheduler.num.schedulable.tasks.per.node", 0,
"The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\n" +
"this should be picked up from the Registry. -1 indicates unlimited capacity; positive\n" +
"values indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node"),
LLAP_TASK_SCHEDULER_LOCALITY_DELAY(
"hive.llap.task.scheduler.locality.delay", "0ms",
new TimeValidator(TimeUnit.MILLISECONDS, -1l, true, Long.MAX_VALUE, true),
"Amount of time to wait before allocating a request which contains location information," +
" to a location other than the ones requested. Set to -1 for an infinite delay, 0" +
"for no delay."
),
LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS(
"hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
" for percentile latency metrics. Used by LLAP daemon task scheduler metrics for\n" +
" time taken to kill task (due to pre-emption) and useful time wasted by the task that\n" +
" is about to be preempted."
),
LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size",
10, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size"),
LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME(
"hive.llap.daemon.wait.queue.comparator.class.name",
"org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator",
"The priority comparator to use for LLAP scheduler priority queue. The built-in options\n" +
"are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" +
".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"),
LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION(
"hive.llap.daemon.task.scheduler.enable.preemption", true,
"Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\n" +
"preempted by finishable tasks inside LLAP scheduler.",
"llap.daemon.task.scheduler.enable.preemption"),
LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS(
"hive.llap.task.communicator.connection.timeout.ms", "16000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.",
"llap.task.communicator.connection.timeout-millis"),
LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT(
"hive.llap.task.communicator.listener.thread-count", 30,
"The number of task communicator listener threads."),
LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\n" +
"connection to LLAP daemon from Tez AM.",
"llap.task.communicator.connection.sleep-between-retries-millis"),
LLAP_DAEMON_WEB_PORT("hive.llap.daemon.web.port", 15002, "LLAP daemon web UI port.",
"llap.daemon.service.port"),
LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false,
"Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"),
LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits", true,
"Whether to setup split locations to match nodes on which llap daemons are running, " +
"instead of using the locations provided by the split itself. If there is no llap daemon " +
"running, fall back to locations provided by the split. This is effective only if " +
"hive.execution.mode is llap"),
LLAP_VALIDATE_ACLS("hive.llap.validate.acls", true,
"Whether LLAP should reject permissive ACLs in some cases (e.g. its own management\n" +
"protocol or ZK paths), similar to how ssh refuses a key with bad access permissions."),
LLAP_DAEMON_OUTPUT_SERVICE_PORT("hive.llap.daemon.output.service.port", 15003,
"LLAP daemon output service port"),
LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT("hive.llap.daemon.output.stream.timeout", "120s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for the client to connect to LLAP output service and start the fragment\n" +
"output after sending the fragment. The fragment will fail if its output is not claimed."),
LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE("hive.llap.daemon.output.service.send.buffer.size",
128 * 1024, "Send buffer size to be used by LLAP daemon output service"),
LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES("hive.llap.daemon.output.service.max.pending.writes",
8, "Maximum number of queued writes allowed per connection when sending data\n" +
" via the LLAP output service to external clients."),
LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT("hive.llap.external.splits.temp.table.storage.format",
"orc", new StringSet("default", "text", "orc"),
"Storage format for temp tables created using LLAP external client"),
LLAP_EXTERNAL_SPLITS_ORDER_BY_FORCE_SINGLE_SPLIT("hive.llap.external.splits.order.by.force.single.split",
true,
"If LLAP external clients submits ORDER BY queries, force return a single split to guarantee reading\n" +
"data out in ordered way. Setting this to false will let external clients read data out in parallel\n" +
"losing the ordering (external clients are responsible for guaranteeing the ordering)"),
LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", false,
"Override if grace join should be allowed to run in llap."),
LLAP_HS2_ENABLE_COORDINATOR("hive.llap.hs2.coordinator.enabled", true,
"Whether to create the LLAP coordinator; since execution engine and container vs llap\n" +
"settings are both coming from job configs, we don't know at start whether this should\n" +
"be created. Default true."),
LLAP_DAEMON_LOGGER("hive.llap.daemon.logger", Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
new StringSet(Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
Constants.LLAP_LOGGER_NAME_RFA,
Constants.LLAP_LOGGER_NAME_CONSOLE),
"logger used for llap-daemons."),
LLAP_OUTPUT_FORMAT_ARROW("hive.llap.output.format.arrow", true,
"Whether LLapOutputFormatService should output arrow batches"),
HIVE_TRIGGER_VALIDATION_INTERVAL("hive.trigger.validation.interval", "500ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Interval for validating triggers during execution of a query. Triggers defined in resource plan will get\n" +
"validated for all SQL operations after every defined interval (default: 500ms) and corresponding action\n" +
"defined in the trigger will be taken"),
SPARK_USE_OP_STATS("hive.spark.use.op.stats", true,
"Whether to use operator stats to determine reducer parallelism for Hive on Spark.\n" +
"If this is false, Hive will use source table stats to determine reducer\n" +
"parallelism for all first level reduce tasks, and the maximum reducer parallelism\n" +
"from all parents for all the rest (second level and onward) reducer tasks."),
SPARK_USE_TS_STATS_FOR_MAPJOIN("hive.spark.use.ts.stats.for.mapjoin", false,
"If this is set to true, mapjoin optimization in Hive/Spark will use statistics from\n" +
"TableScan operators at the root of operator tree, instead of parent ReduceSink\n" +
"operators of the Join operator."),
SPARK_OPTIMIZE_SHUFFLE_SERDE("hive.spark.optimize.shuffle.serde", false,
"If this is set to true, Hive on Spark will register custom serializers for data types\n" +
"in shuffle. This should result in less shuffled data."),
SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for requests from Hive client to remote Spark driver."),
SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for job monitor to get Spark job state."),
SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout",
"1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for remote Spark driver in connecting back to Hive client."),
SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout",
"90000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for handshake between Hive client and remote Spark driver. Checked by both processes."),
SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256",
"Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " +
"Rounded down to the nearest multiple of 8."),
SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8,
"Maximum number of threads for remote Spark driver's RPC event loop."),
SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024,
"Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."),
SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
"Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
"Name of the SASL mechanism to use for authentication."),
SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "",
"The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " +
"Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." +
"This is only necessary if the host has multiple network addresses and if a different network address other than " +
"hive.server2.thrift.bind.host is to be used."),
SPARK_RPC_SERVER_PORT("hive.spark.client.rpc.server.port", "", "A list of port ranges which can be used by RPC server " +
"with the format of 49152-49222,49228 and a random one is selected from the list. Default is empty, which randomly " +
"selects one port from all available ones."),
SPARK_DYNAMIC_PARTITION_PRUNING(
"hive.spark.dynamic.partition.pruning", false,
"When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" +
"to a temporary HDFS file, and read later for removing unnecessary partitions."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE(
"hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size in dynamic pruning."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY(
"hive.spark.dynamic.partition.pruning.map.join.only", false,
"Turn on dynamic partition pruning only for map joins.\n" +
"If hive.spark.dynamic.partition.pruning is set to true, this parameter value is ignored."),
SPARK_USE_GROUPBY_SHUFFLE(
"hive.spark.use.groupby.shuffle", true,
"Spark groupByKey transformation has better performance but uses unbounded memory." +
"Turn this off when there is a memory issue."),
SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
"If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
SPARK_STAGE_MAX_TASKS("hive.spark.stage.max.tasks", -1, "The maximum number of tasks a stage in a Spark job may have.\n" +
"If a Spark job stage contains more tasks than the maximum, the job will be cancelled. A value of -1 means no limit."),
NWAYJOINREORDER("hive.reorder.nway.joins", true,
"Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", true,
"Merge adjacent joins into a single n-way join"),
HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null),
"If value is greater than 0 logs in fixed intervals of size n rather than exponentially."),
HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw",
new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " +
"directories that are partition-like but contain unsupported characters. 'throw' (an " +
"exception) is the default; 'skip' will skip the invalid directories and still repair the" +
" others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"),
HIVE_MSCK_REPAIR_BATCH_SIZE(
"hive.msck.repair.batch.size", 3000,
"Batch size for the msck repair command. If the value is greater than zero,\n "
+ "it will execute batch wise with the configured batch size. In case of errors while\n"
+ "adding unknown partitions the batch size is automatically reduced by half in the subsequent\n"
+ "retry attempt. The default value is 3000 which means it will execute in the batches of 3000."),
HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES("hive.msck.repair.batch.max.retries", 4,
"Maximum number of retries for the msck repair command when adding unknown partitions.\n "
+ "If the value is greater than zero it will retry adding unknown partitions until the maximum\n"
+ "number of attempts is reached or batch size is reduced to 0, whichever is earlier.\n"
+ "In each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\n"
+ "If the value is set to zero it will retry until the batch size becomes zero as described above."),
HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1,
"The number of queries allowed in parallel via llap. Negative number implies 'infinite'."),
HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true,
"Enable memory manager for tez"),
HIVE_HASH_TABLE_INFLATION_FACTOR("hive.hash.table.inflation.factor", (float) 2.0,
"Expected inflation factor between disk/in memory representation of hash tables"),
HIVE_LOG_TRACE_ID("hive.log.trace.id", "",
"Log tracing id that can be used by upstream clients for tracking respective logs. " +
"Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."),
HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true,
"Whether to use listFiles (optimized on S3) instead of globStatus when on S3."),
// If a parameter is added to the restricted list, add a test in TestRestrictedList.Java
HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
"hive.security.authenticator.manager,hive.security.authorization.manager," +
"hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager," +
"hive.users.in.admin.role,hive.server2.xsrf.filter.enabled,hive.security.authorization.enabled," +
"hive.distcp.privileged.doAs," +
"hive.server2.authentication.ldap.baseDN," +
"hive.server2.authentication.ldap.url," +
"hive.server2.authentication.ldap.Domain," +
"hive.server2.authentication.ldap.groupDNPattern," +
"hive.server2.authentication.ldap.groupFilter," +
"hive.server2.authentication.ldap.userDNPattern," +
"hive.server2.authentication.ldap.userFilter," +
"hive.server2.authentication.ldap.groupMembershipKey," +
"hive.server2.authentication.ldap.userMembershipKey," +
"hive.server2.authentication.ldap.groupClassKey," +
"hive.server2.authentication.ldap.customLDAPQuery," +
"hive.privilege.synchronizer.interval," +
"hive.spark.client.connect.timeout," +
"hive.spark.client.server.connect.timeout," +
"hive.spark.client.channel.log.level," +
"hive.spark.client.rpc.max.size," +
"hive.spark.client.rpc.threads," +
"hive.spark.client.secret.bits," +
"hive.spark.client.rpc.server.address," +
"hive.spark.client.rpc.server.port," +
"hive.spark.client.rpc.sasl.mechanisms," +
"bonecp.,"+
"hive.druid.broker.address.default,"+
"hive.druid.coordinator.address.default,"+
"hikari.,"+
"hadoop.bin.path,"+
"yarn.bin.path,"+
"spark.home",
"Comma separated list of configuration options which are immutable at runtime"),
HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list",
METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname
+ "," + DRUID_METADATA_DB_PASSWORD.varname,
"Comma separated list of configuration options which should not be read by normal user like passwords"),
HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
"hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
"Comma separated list of variables which are used internally and should not be configurable."),
HIVE_SPARK_RSC_CONF_LIST("hive.spark.rsc.conf.list",
SPARK_OPTIMIZE_SHUFFLE_SERDE.varname + "," +
SPARK_CLIENT_FUTURE_TIMEOUT.varname,
"Comma separated list of variables which are related to remote spark context.\n" +
"Changing these variables will result in re-creating the spark session."),
HIVE_QUERY_TIMEOUT_SECONDS("hive.query.timeout.seconds", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for Running Query in seconds. A nonpositive value means infinite. " +
"If the query timeout is also set by thrift API call, the smaller one will be taken."),
HIVE_EXEC_INPUT_LISTING_MAX_THREADS("hive.exec.input.listing.max.threads", 0, new SizeValidator(0L, true, 1024L, true),
"Maximum number of threads that Hive uses to list file information from file systems (recommended > 1 for blobstore)."),
HIVE_QUERY_REEXECUTION_ENABLED("hive.query.reexecution.enabled", true,
"Enable query reexecutions"),
HIVE_QUERY_REEXECUTION_STRATEGIES("hive.query.reexecution.strategies", "overlay,reoptimize",
"comma separated list of plugin can be used:\n"
+ " overlay: hiveconf subtree 'reexec.overlay' is used as an overlay in case of an execution errors out\n"
+ " reoptimize: collects operator statistics during execution and recompile the query after a failure"),
HIVE_QUERY_REEXECUTION_STATS_PERSISTENCE("hive.query.reexecution.stats.persist.scope", "query",
new StringSet("query", "hiveserver", "metastore"),
"Sets the persistence scope of runtime statistics\n"
+ " query: runtime statistics are only used during re-execution\n"
+ " hiveserver: runtime statistics are persisted in the hiveserver - all sessions share it\n"
+ " metastore: runtime statistics are persisted in the metastore as well"),
HIVE_QUERY_MAX_REEXECUTION_COUNT("hive.query.reexecution.max.count", 1,
"Maximum number of re-executions for a single query."),
HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS("hive.query.reexecution.always.collect.operator.stats", false,
"If sessionstats are enabled; this option can be used to collect statistics all the time"),
HIVE_QUERY_REEXECUTION_STATS_CACHE_BATCH_SIZE("hive.query.reexecution.stats.cache.batch.size", -1,
"If runtime stats are stored in metastore; the maximal batch size per round during load."),
HIVE_QUERY_REEXECUTION_STATS_CACHE_SIZE("hive.query.reexecution.stats.cache.size", 100_000,
"Size of the runtime statistics cache. Unit is: OperatorStat entry; a query plan consist ~100."),
HIVE_QUERY_RESULTS_CACHE_ENABLED("hive.query.results.cache.enabled", true,
"If the query results cache is enabled. This will keep results of previously executed queries " +
"to be reused if the same query is executed again."),
HIVE_QUERY_RESULTS_CACHE_NONTRANSACTIONAL_TABLES_ENABLED("hive.query.results.cache.nontransactional.tables.enabled", false,
"If the query results cache is enabled for queries involving non-transactional tables." +
"Users who enable this setting should be willing to tolerate some amount of stale results in the cache."),
HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS("hive.query.results.cache.wait.for.pending.results", true,
"Should a query wait for the pending results of an already running query, " +
"in order to use the cached result when it becomes ready"),
HIVE_QUERY_RESULTS_CACHE_DIRECTORY("hive.query.results.cache.directory",
"/tmp/hive/_resultscache_",
"Location of the query results cache directory. Temporary results from queries " +
"will be moved to this location."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_LIFETIME("hive.query.results.cache.max.entry.lifetime", "3600s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum lifetime in seconds for an entry in the query results cache. A nonpositive value means infinite."),
HIVE_QUERY_RESULTS_CACHE_MAX_SIZE("hive.query.results.cache.max.size",
(long) 2 * 1024 * 1024 * 1024,
"Maximum total size in bytes that the query results cache directory is allowed to use on the filesystem."),
HIVE_QUERY_RESULTS_CACHE_MAX_ENTRY_SIZE("hive.query.results.cache.max.entry.size",
(long) 10 * 1024 * 1024,
"Maximum size in bytes that a single query result is allowed to use in the results cache directory"),
HIVE_NOTFICATION_EVENT_POLL_INTERVAL("hive.notification.event.poll.interval", "60s",
new TimeValidator(TimeUnit.SECONDS),
"How often the notification log is polled for new NotificationEvents from the metastore." +
"A nonpositive value means the notification log is never polled."),
HIVE_NOTFICATION_EVENT_CONSUMERS("hive.notification.event.consumers",
"org.apache.hadoop.hive.ql.cache.results.QueryResultsCache$InvalidationEventConsumer",
"Comma-separated list of class names extending EventConsumer," +
"to handle the NotificationEvents retreived by the notification event poll."),
/* BLOBSTORE section */
HIVE_BLOBSTORE_SUPPORTED_SCHEMES("hive.blobstore.supported.schemes", "s3,s3a,s3n",
"Comma-separated list of supported blobstore schemes."),
HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR("hive.blobstore.use.blobstore.as.scratchdir", false,
"Enable the use of scratch directories directly on blob storage systems (it may cause performance penalties)."),
HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED("hive.blobstore.optimizations.enabled", true,
"This parameter enables a number of optimizations when running on blobstores:\n" +
"(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force the last Hive job to write to the blobstore.\n" +
"This is a performance optimization that forces the final FileSinkOperator to write to the blobstore.\n" +
"See HIVE-15121 for details.");
public final String varname;
public final String altName;
private final String defaultExpr;
public final String defaultStrVal;
public final int defaultIntVal;
public final long defaultLongVal;
public final float defaultFloatVal;
public final boolean defaultBoolVal;
private final Class<?> valClass;
private final VarType valType;
private final Validator validator;
private final String description;
private final boolean excluded;
private final boolean caseSensitive;
ConfVars(String varname, Object defaultVal, String description) {
this(varname, defaultVal, null, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, String description, String altName) {
this(varname, defaultVal, null, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
String altName) {
this(varname, defaultVal, validator, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
this(varname, defaultVal, null, description, true, excluded, null);
}
ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
this(varname, defaultVal, null, description, caseSensitive, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description) {
this(varname, defaultVal, validator, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description, boolean excluded) {
this(varname, defaultVal, validator, description, true, excluded, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
boolean caseSensitive, boolean excluded, String altName) {
this.varname = varname;
this.validator = validator;
this.description = description;
this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
this.excluded = excluded;
this.caseSensitive = caseSensitive;
this.altName = altName;
if (defaultVal == null || defaultVal instanceof String) {
this.valClass = String.class;
this.valType = VarType.STRING;
this.defaultStrVal = SystemVariables.substitute((String)defaultVal);
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Integer) {
this.valClass = Integer.class;
this.valType = VarType.INT;
this.defaultStrVal = null;
this.defaultIntVal = (Integer)defaultVal;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Long) {
this.valClass = Long.class;
this.valType = VarType.LONG;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = (Long)defaultVal;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Float) {
this.valClass = Float.class;
this.valType = VarType.FLOAT;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = (Float)defaultVal;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Boolean) {
this.valClass = Boolean.class;
this.valType = VarType.BOOLEAN;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = (Boolean)defaultVal;
} else {
throw new IllegalArgumentException("Not supported type value " + defaultVal.getClass() +
" for name " + varname);
}
}
public boolean isType(String value) {
return valType.isType(value);
}
public Validator getValidator() {
return validator;
}
public String validate(String value) {
return validator == null ? null : validator.validate(value);
}
public String validatorDescription() {
return validator == null ? null : validator.toDescription();
}
public String typeString() {
String type = valType.typeString();
if (valType == VarType.STRING && validator != null) {
if (validator instanceof TimeValidator) {
type += "(TIME)";
}
}
return type;
}
public String getRawDescription() {
return description;
}
public String getDescription() {
String validator = validatorDescription();
if (validator != null) {
return validator + ".\n" + description;
}
return description;
}
public boolean isExcluded() {
return excluded;
}
public boolean isCaseSensitive() {
return caseSensitive;
}
@Override
public String toString() {
return varname;
}
private static String findHadoopBinary() {
String val = findHadoopHome();
// if can't find hadoop home we can at least try /usr/bin/hadoop
val = (val == null ? File.separator + "usr" : val)
+ File.separator + "bin" + File.separator + "hadoop";
// Launch hadoop command file on windows.
return val;
}
private static String findYarnBinary() {
String val = findHadoopHome();
val = (val == null ? "yarn" : val + File.separator + "bin" + File.separator + "yarn");
return val;
}
private static String findMapRedBinary() {
String val = findHadoopHome();
val = (val == null ? "mapred" : val + File.separator + "bin" + File.separator + "mapred");
return val;
}
private static String findHadoopHome() {
String val = System.getenv("HADOOP_HOME");
// In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX
if (val == null) {
val = System.getenv("HADOOP_PREFIX");
}
return val;
}
public String getDefaultValue() {
return valType.defaultValueString(this);
}
public String getDefaultExpr() {
return defaultExpr;
}
private Set<String> getValidStringValues() {
if (validator == null || !(validator instanceof StringSet)) {
throw new RuntimeException(varname + " does not specify a list of valid values");
}
return ((StringSet)validator).getExpected();
}
enum VarType {
STRING {
@Override
void checkType(String value) throws Exception { }
@Override
String defaultValueString(ConfVars confVar) { return confVar.defaultStrVal; }
},
INT {
@Override
void checkType(String value) throws Exception { Integer.valueOf(value); }
},
LONG {
@Override
void checkType(String value) throws Exception { Long.valueOf(value); }
},
FLOAT {
@Override
void checkType(String value) throws Exception { Float.valueOf(value); }
},
BOOLEAN {
@Override
void checkType(String value) throws Exception { Boolean.valueOf(value); }
};
boolean isType(String value) {
try { checkType(value); } catch (Exception e) { return false; }
return true;
}
String typeString() { return name().toUpperCase();}
String defaultValueString(ConfVars confVar) { return confVar.defaultExpr; }
abstract void checkType(String value) throws Exception;
}
}
/**
* Writes the default ConfVars out to a byte array and returns an input
* stream wrapping that byte array.
*
* We need this in order to initialize the ConfVar properties
* in the underling Configuration object using the addResource(InputStream)
* method.
*
* It is important to use a LoopingByteArrayInputStream because it turns out
* addResource(InputStream) is broken since Configuration tries to read the
* entire contents of the same InputStream repeatedly without resetting it.
* LoopingByteArrayInputStream has special logic to handle this.
*/
private static synchronized InputStream getConfVarInputStream() {
if (confVarByteArray == null) {
try {
// Create a Hadoop configuration without inheriting default settings.
Configuration conf = new Configuration(false);
applyDefaultNonNullConfVars(conf);
ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream();
conf.writeXml(confVarBaos);
confVarByteArray = confVarBaos.toByteArray();
} catch (Exception e) {
// We're pretty screwed if we can't load the default conf vars
throw new RuntimeException("Failed to initialize default Hive configuration variables!", e);
}
}
return new LoopingByteArrayInputStream(confVarByteArray);
}
public void verifyAndSet(String name, String value) throws IllegalArgumentException {
if (modWhiteListPattern != null) {
Matcher wlMatcher = modWhiteListPattern.matcher(name);
if (!wlMatcher.matches()) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. "
+ "It is not in list of params that are allowed to be modified at runtime");
}
}
if (Iterables.any(restrictList,
restrictedVar -> name != null && name.startsWith(restrictedVar))) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
+ " of parameters that can't be modified at runtime or is prefixed by a restricted variable");
}
String oldValue = name != null ? get(name) : null;
if (name == null || value == null || !value.equals(oldValue)) {
// When either name or value is null, the set method below will fail,
// and throw IllegalArgumentException
set(name, value);
if (isSparkRelatedConfig(name)) {
isSparkConfigUpdated = true;
}
}
}
public boolean isHiddenConfig(String name) {
return Iterables.any(hiddenSet, hiddenVar -> name.startsWith(hiddenVar));
}
public static boolean isEncodedPar(String name) {
for (ConfVars confVar : HiveConf.ENCODED_CONF) {
ConfVars confVar1 = confVar;
if (confVar1.varname.equals(name)) {
return true;
}
}
return false;
}
/**
* check whether spark related property is updated, which includes spark configurations,
* RSC configurations and yarn configuration in Spark on YARN mode.
* @param name
* @return
*/
private boolean isSparkRelatedConfig(String name) {
boolean result = false;
if (name.startsWith("spark")) { // Spark property.
// for now we don't support changing spark app name on the fly
result = !name.equals("spark.app.name");
} else if (name.startsWith("yarn")) { // YARN property in Spark on YARN mode.
String sparkMaster = get("spark.master");
if (sparkMaster != null && sparkMaster.startsWith("yarn")) {
result = true;
}
} else if (rscList.stream().anyMatch(rscVar -> rscVar.equals(name))) { // Remote Spark Context property.
result = true;
} else if (name.equals("mapreduce.job.queuename")) {
// a special property starting with mapreduce that we would also like to effect if it changes
result = true;
}
return result;
}
public static int getIntVar(Configuration conf, ConfVars var) {
assert (var.valClass == Integer.class) : var.varname;
if (var.altName != null) {
return conf.getInt(var.varname, conf.getInt(var.altName, var.defaultIntVal));
}
return conf.getInt(var.varname, var.defaultIntVal);
}
public static void setIntVar(Configuration conf, ConfVars var, int val) {
assert (var.valClass == Integer.class) : var.varname;
conf.setInt(var.varname, val);
}
public int getIntVar(ConfVars var) {
return getIntVar(this, var);
}
public void setIntVar(ConfVars var, int val) {
setIntVar(this, var, val);
}
public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) {
return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit);
}
public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, time + stringFor(timeunit));
}
public long getTimeVar(ConfVars var, TimeUnit outUnit) {
return getTimeVar(this, var, outUnit);
}
public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
setTimeVar(this, var, time, outUnit);
}
public static long getSizeVar(Configuration conf, ConfVars var) {
return toSizeBytes(getVar(conf, var));
}
public long getSizeVar(ConfVars var) {
return getSizeVar(this, var);
}
public static TimeUnit getDefaultTimeUnit(ConfVars var) {
TimeUnit inputUnit = null;
if (var.validator instanceof TimeValidator) {
inputUnit = ((TimeValidator)var.validator).getTimeUnit();
}
return inputUnit;
}
public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return outUnit.convert(Long.parseLong(parsed[0].trim()), unitFor(parsed[1].trim(), inputUnit));
}
public static long toSizeBytes(String value) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return Long.parseLong(parsed[0].trim()) * multiplierFor(parsed[1].trim());
}
private static String[] parseNumberFollowedByUnit(String value) {
char[] chars = value.toCharArray();
int i = 0;
for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
}
return new String[] {value.substring(0, i), value.substring(i)};
}
private static Set<String> daysSet = ImmutableSet.of("d", "D", "day", "DAY", "days", "DAYS");
private static Set<String> hoursSet = ImmutableSet.of("h", "H", "hour", "HOUR", "hours", "HOURS");
private static Set<String> minutesSet = ImmutableSet.of("m", "M", "min", "MIN", "mins", "MINS",
"minute", "MINUTE", "minutes", "MINUTES");
private static Set<String> secondsSet = ImmutableSet.of("s", "S", "sec", "SEC", "secs", "SECS",
"second", "SECOND", "seconds", "SECONDS");
private static Set<String> millisSet = ImmutableSet.of("ms", "MS", "msec", "MSEC", "msecs", "MSECS",
"millisecond", "MILLISECOND", "milliseconds", "MILLISECONDS");
private static Set<String> microsSet = ImmutableSet.of("us", "US", "usec", "USEC", "usecs", "USECS",
"microsecond", "MICROSECOND", "microseconds", "MICROSECONDS");
private static Set<String> nanosSet = ImmutableSet.of("ns", "NS", "nsec", "NSEC", "nsecs", "NSECS",
"nanosecond", "NANOSECOND", "nanoseconds", "NANOSECONDS");
public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("l")) {
if (defaultUnit == null) {
throw new IllegalArgumentException("Time unit is not specified");
}
return defaultUnit;
} else if (daysSet.contains(unit)) {
return TimeUnit.DAYS;
} else if (hoursSet.contains(unit)) {
return TimeUnit.HOURS;
} else if (minutesSet.contains(unit)) {
return TimeUnit.MINUTES;
} else if (secondsSet.contains(unit)) {
return TimeUnit.SECONDS;
} else if (millisSet.contains(unit)) {
return TimeUnit.MILLISECONDS;
} else if (microsSet.contains(unit)) {
return TimeUnit.MICROSECONDS;
} else if (nanosSet.contains(unit)) {
return TimeUnit.NANOSECONDS;
}
throw new IllegalArgumentException("Invalid time unit " + unit);
}
public static long multiplierFor(String unit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) {
return 1;
} else if (unit.equals("kb")) {
return 1024;
} else if (unit.equals("mb")) {
return 1024*1024;
} else if (unit.equals("gb")) {
return 1024*1024*1024;
} else if (unit.equals("tb")) {
return 1024L*1024*1024*1024;
} else if (unit.equals("pb")) {
return 1024L*1024*1024*1024*1024;
}
throw new IllegalArgumentException("Invalid size unit " + unit);
}
public static String stringFor(TimeUnit timeunit) {
switch (timeunit) {
case DAYS: return "day";
case HOURS: return "hour";
case MINUTES: return "min";
case SECONDS: return "sec";
case MILLISECONDS: return "msec";
case MICROSECONDS: return "usec";
case NANOSECONDS: return "nsec";
}
throw new IllegalArgumentException("Invalid timeunit " + timeunit);
}
public static long getLongVar(Configuration conf, ConfVars var) {
assert (var.valClass == Long.class) : var.varname;
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, var.defaultLongVal));
}
return conf.getLong(var.varname, var.defaultLongVal);
}
public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) {
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, defaultVal));
}
return conf.getLong(var.varname, defaultVal);
}
public static void setLongVar(Configuration conf, ConfVars var, long val) {
assert (var.valClass == Long.class) : var.varname;
conf.setLong(var.varname, val);
}
public long getLongVar(ConfVars var) {
return getLongVar(this, var);
}
public void setLongVar(ConfVars var, long val) {
setLongVar(this, var, val);
}
public static float getFloatVar(Configuration conf, ConfVars var) {
assert (var.valClass == Float.class) : var.varname;
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, var.defaultFloatVal));
}
return conf.getFloat(var.varname, var.defaultFloatVal);
}
public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) {
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, defaultVal));
}
return conf.getFloat(var.varname, defaultVal);
}
public static void setFloatVar(Configuration conf, ConfVars var, float val) {
assert (var.valClass == Float.class) : var.varname;
conf.setFloat(var.varname, val);
}
public float getFloatVar(ConfVars var) {
return getFloatVar(this, var);
}
public void setFloatVar(ConfVars var, float val) {
setFloatVar(this, var, val);
}
public static boolean getBoolVar(Configuration conf, ConfVars var) {
assert (var.valClass == Boolean.class) : var.varname;
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, var.defaultBoolVal));
}
return conf.getBoolean(var.varname, var.defaultBoolVal);
}
public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) {
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, defaultVal));
}
return conf.getBoolean(var.varname, defaultVal);
}
public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
assert (var.valClass == Boolean.class) : var.varname;
conf.setBoolean(var.varname, val);
}
/* Dynamic partition pruning is enabled in some or all cases if either
* hive.spark.dynamic.partition.pruning is true or
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public static boolean isSparkDPPAny(Configuration conf) {
return (conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING.defaultBoolVal) ||
conf.getBoolean(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.varname,
ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY.defaultBoolVal));
}
public boolean getBoolVar(ConfVars var) {
return getBoolVar(this, var);
}
public void setBoolVar(ConfVars var, boolean val) {
setBoolVar(this, var, val);
}
public static String getVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal))
: conf.get(var.varname, var.defaultStrVal);
}
public static String getVarWithoutType(Configuration conf, ConfVars var) {
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultExpr))
: conf.get(var.varname, var.defaultExpr);
}
public static String getTrimmedVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
if (var.altName != null) {
return conf.getTrimmed(var.varname, conf.getTrimmed(var.altName, var.defaultStrVal));
}
return conf.getTrimmed(var.varname, var.defaultStrVal);
}
public static String[] getTrimmedStringsVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
String[] result = conf.getTrimmedStrings(var.varname, (String[])null);
if (result != null) {
return result;
}
if (var.altName != null) {
result = conf.getTrimmedStrings(var.altName, (String[])null);
if (result != null) {
return result;
}
}
return org.apache.hadoop.util.StringUtils.getTrimmedStrings(var.defaultStrVal);
}
public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal))
: conf.get(var.varname, defaultVal);
return ret;
}
public static String getVar(Configuration conf, ConfVars var, EncoderDecoder<String, String> encoderDecoder) {
return encoderDecoder.decode(getVar(conf, var));
}
public String getLogIdVar(String defaultValue) {
String retval = getVar(ConfVars.HIVE_LOG_TRACE_ID);
if (StringUtils.EMPTY.equals(retval)) {
LOG.info("Using the default value passed in for log id: {}", defaultValue);
retval = defaultValue;
}
if (retval.length() > LOG_PREFIX_LENGTH) {
LOG.warn("The original log id prefix is {} has been truncated to {}", retval,
retval.substring(0, LOG_PREFIX_LENGTH - 1));
retval = retval.substring(0, LOG_PREFIX_LENGTH - 1);
}
return retval;
}
public static void setVar(Configuration conf, ConfVars var, String val) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, val);
}
public static void setVar(Configuration conf, ConfVars var, String val,
EncoderDecoder<String, String> encoderDecoder) {
setVar(conf, var, encoderDecoder.encode(val));
}
public static ConfVars getConfVars(String name) {
return vars.get(name);
}
public static ConfVars getMetaConf(String name) {
return metaConfs.get(name);
}
public String getVar(ConfVars var) {
return getVar(this, var);
}
public void setVar(ConfVars var, String val) {
setVar(this, var, val);
}
public String getQueryString() {
return getQueryString(this);
}
public static String getQueryString(Configuration conf) {
return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void setQueryString(String query) {
setQueryString(this, query);
}
public static void setQueryString(Configuration conf, String query) {
setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void logVars(PrintStream ps) {
for (ConfVars one : ConfVars.values()) {
ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
}
}
public HiveConf() {
super();
initialize(this.getClass());
}
public HiveConf(Class<?> cls) {
super();
initialize(cls);
}
public HiveConf(Configuration other, Class<?> cls) {
super(other);
initialize(cls);
}
/**
* Copy constructor
*/
public HiveConf(HiveConf other) {
super(other);
hiveJar = other.hiveJar;
auxJars = other.auxJars;
isSparkConfigUpdated = other.isSparkConfigUpdated;
origProp = (Properties)other.origProp.clone();
restrictList.addAll(other.restrictList);
hiddenSet.addAll(other.hiddenSet);
modWhiteListPattern = other.modWhiteListPattern;
}
public Properties getAllProperties() {
return getProperties(this);
}
public static Properties getProperties(Configuration conf) {
Iterator<Map.Entry<String, String>> iter = conf.iterator();
Properties p = new Properties();
while (iter.hasNext()) {
Map.Entry<String, String> e = iter.next();
p.setProperty(e.getKey(), e.getValue());
}
return p;
}
private void initialize(Class<?> cls) {
hiveJar = (new JobConf(cls)).getJar();
// preserve the original configuration
origProp = getAllProperties();
// Overlay the ConfVars. Note that this ignores ConfVars with null values
addResource(getConfVarInputStream());
// Overlay hive-site.xml if it exists
if (hiveSiteURL != null) {
addResource(hiveSiteURL);
}
// if embedded metastore is to be used as per config so far
// then this is considered like the metastore server case
String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS);
// This is hackery, but having hive-common depend on standalone-metastore is really bad
// because it will pull all of the metastore code into every module. We need to check that
// we aren't using the standalone metastore. If we are, we should treat it the same as a
// remote metastore situation.
if (msUri == null || msUri.isEmpty()) {
msUri = this.get("metastore.thrift.uris");
}
LOG.debug("Found metastore URI of " + msUri);
if(HiveConfUtil.isEmbeddedMetaStore(msUri)){
setLoadMetastoreConfig(true);
}
// load hivemetastore-site.xml if this is metastore and file exists
if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) {
addResource(hivemetastoreSiteUrl);
}
// load hiveserver2-site.xml if this is hiveserver2 and file exists
// metastore can be embedded within hiveserver2, in such cases
// the conf params in hiveserver2-site.xml will override whats defined
// in hivemetastore-site.xml
if (isLoadHiveServer2Config() && hiveServer2SiteUrl != null) {
addResource(hiveServer2SiteUrl);
}
// Overlay the values of any system properties whose names appear in the list of ConfVars
applySystemProperties();
if ((this.get("hive.metastore.ds.retry.attempts") != null) ||
this.get("hive.metastore.ds.retry.interval") != null) {
LOG.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " +
"Use hive.hmshandler.retry.* instead");
}
// if the running class was loaded directly (through eclipse) rather than through a
// jar then this would be needed
if (hiveJar == null) {
hiveJar = this.get(ConfVars.HIVEJAR.varname);
}
if (auxJars == null) {
auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ',');
}
if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) {
setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false);
}
if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
List<String> trimmed = new ArrayList<String>();
for (Map.Entry<String,String> entry : this) {
String key = entry.getKey();
if (key == null || !key.startsWith("hive.")) {
continue;
}
ConfVars var = HiveConf.getConfVars(key);
if (var == null) {
var = HiveConf.getConfVars(key.trim());
if (var != null) {
trimmed.add(key);
}
}
if (var == null) {
LOG.warn("HiveConf of name {} does not exist", key);
} else if (!var.isType(entry.getValue())) {
LOG.warn("HiveConf {} expects {} type value", var.varname, var.typeString());
}
}
for (String key : trimmed) {
set(key.trim(), getRaw(key));
unset(key);
}
}
validateExecutionEngine();
setupSQLStdAuthWhiteList();
// setup list of conf vars that are not allowed to change runtime
setupRestrictList();
hiddenSet.clear();
hiddenSet.addAll(HiveConfUtil.getHiddenSet(this));
setupRSCList();
}
/**
* 'mr' execution engine is only allowed by tests
*/
private void validateExecutionEngine() {
if("mr".equals(getVar(ConfVars.HIVE_EXECUTION_ENGINE)) && !getBoolVar(ConfVars.HIVE_IN_TEST)) {
throw new IllegalArgumentException("mr execution engine is not supported!");
}
}
/**
* If the config whitelist param for sql standard authorization is not set, set it up here.
*/
private void setupSQLStdAuthWhiteList() {
String whiteListParamsStr = getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST);
if (whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) {
// set the default configs in whitelist
whiteListParamsStr = getSQLStdAuthDefaultWhiteListPattern();
}
setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr);
}
private static String getSQLStdAuthDefaultWhiteListPattern() {
// create the default white list from list of safe config params
// and regex list
String confVarPatternStr = Joiner.on("|").join(convertVarsToRegex(sqlStdAuthSafeVarNames));
String regexPatternStr = Joiner.on("|").join(sqlStdAuthSafeVarNameRegexes);
return regexPatternStr + "|" + confVarPatternStr;
}
/**
* Obtains the local time-zone ID.
*/
public ZoneId getLocalTimeZone() {
String timeZoneStr = getVar(ConfVars.HIVE_LOCAL_TIME_ZONE);
return TimestampTZUtil.parseTimeZone(timeZoneStr);
}
/**
* @param paramList list of parameter strings
* @return list of parameter strings with "." replaced by "\."
*/
private static String[] convertVarsToRegex(String[] paramList) {
String[] regexes = new String[paramList.length];
for(int i=0; i<paramList.length; i++) {
regexes[i] = paramList[i].replace(".", "\\." );
}
return regexes;
}
/**
* Default list of modifiable config parameters for sql standard authorization
* For internal use only.
*/
private static final String [] sqlStdAuthSafeVarNames = new String [] {
ConfVars.AGGR_JOIN_TRANSPOSE.varname,
ConfVars.BYTESPERREDUCER.varname,
ConfVars.CLIENT_STATS_COUNTERS.varname,
ConfVars.DEFAULTPARTITIONNAME.varname,
ConfVars.DROPIGNORESNONEXISTENT.varname,
ConfVars.HIVECOUNTERGROUP.varname,
ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
ConfVars.HIVEEXPREVALUATIONCACHE.varname,
ConfVars.HIVEQUERYRESULTFILEFORMAT.varname,
ConfVars.HIVEHASHTABLELOADFACTOR.varname,
ConfVars.HIVEHASHTABLETHRESHOLD.varname,
ConfVars.HIVEIGNOREMAPJOINHINT.varname,
ConfVars.HIVELIMITMAXROWSIZE.varname,
ConfVars.HIVEMAPREDMODE.varname,
ConfVars.HIVEMAPSIDEAGGREGATE.varname,
ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname,
ConfVars.HIVEROWOFFSET.varname,
ConfVars.HIVEVARIABLESUBSTITUTE.varname,
ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname,
ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
ConfVars.HIVE_COMPAT.varname,
ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
ConfVars.HIVE_EXECUTION_ENGINE.varname,
ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname,
ConfVars.HIVE_EXIM_URI_SCHEME_WL.varname,
ConfVars.HIVE_FILE_MAX_FOOTER.varname,
ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED.varname,
ConfVars.HIVE_QUERY_RESULTS_CACHE_WAIT_FOR_PENDING_RESULTS.varname,
ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,
ConfVars.HIVE_SCHEMA_EVOLUTION.varname,
ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname,
ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS.varname,
ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES.varname,
ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES.varname,
ConfVars.JOB_DEBUG_TIMEOUT.varname,
ConfVars.LLAP_IO_ENABLED.varname,
ConfVars.LLAP_IO_USE_FILEID_PATH.varname,
ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname,
ConfVars.LLAP_EXECUTION_MODE.varname,
ConfVars.LLAP_AUTO_ALLOW_UBER.varname,
ConfVars.LLAP_AUTO_ENFORCE_TREE.varname,
ConfVars.LLAP_AUTO_ENFORCE_VECTORIZED.varname,
ConfVars.LLAP_AUTO_ENFORCE_STATS.varname,
ConfVars.LLAP_AUTO_MAX_INPUT.varname,
ConfVars.LLAP_AUTO_MAX_OUTPUT.varname,
ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK.varname,
ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname,
ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname,
ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname,
ConfVars.MAXCREATEDFILES.varname,
ConfVars.MAXREDUCERS.varname,
ConfVars.NWAYJOINREORDER.varname,
ConfVars.OUTPUT_FILE_EXTENSION.varname,
ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname,
ConfVars.TASKLOG_DEBUG_TIMEOUT.varname,
ConfVars.HIVEQUERYID.varname,
ConfVars.HIVEQUERYTAG.varname,
};
/**
* Default list of regexes for config parameters that are modifiable with
* sql standard authorization enabled
*/
static final String [] sqlStdAuthSafeVarNameRegexes = new String [] {
"hive\\.auto\\..*",
"hive\\.cbo\\..*",
"hive\\.convert\\..*",
"hive\\.druid\\..*",
"hive\\.exec\\.dynamic\\.partition.*",
"hive\\.exec\\.max\\.dynamic\\.partitions.*",
"hive\\.exec\\.compress\\..*",
"hive\\.exec\\.infer\\..*",
"hive\\.exec\\.mode.local\\..*",
"hive\\.exec\\.orc\\..*",
"hive\\.exec\\.parallel.*",
"hive\\.explain\\..*",
"hive\\.fetch.task\\..*",
"hive\\.groupby\\..*",
"hive\\.hbase\\..*",
"hive\\.index\\..*",
"hive\\.index\\..*",
"hive\\.intermediate\\..*",
"hive\\.jdbc\\..*",
"hive\\.join\\..*",
"hive\\.limit\\..*",
"hive\\.log\\..*",
"hive\\.mapjoin\\..*",
"hive\\.merge\\..*",
"hive\\.optimize\\..*",
"hive\\.orc\\..*",
"hive\\.outerjoin\\..*",
"hive\\.parquet\\..*",
"hive\\.ppd\\..*",
"hive\\.prewarm\\..*",
"hive\\.server2\\.thrift\\.resultset\\.default\\.fetch\\.size",
"hive\\.server2\\.proxy\\.user",
"hive\\.skewjoin\\..*",
"hive\\.smbjoin\\..*",
"hive\\.stats\\..*",
"hive\\.strict\\..*",
"hive\\.tez\\..*",
"hive\\.vectorized\\..*",
"fs\\.defaultFS",
"ssl\\.client\\.truststore\\.location",
"distcp\\.atomic",
"distcp\\.ignore\\.failures",
"distcp\\.preserve\\.status",
"distcp\\.preserve\\.rawxattrs",
"distcp\\.sync\\.folders",
"distcp\\.delete\\.missing\\.source",
"distcp\\.keystore\\.resource",
"distcp\\.liststatus\\.threads",
"distcp\\.max\\.maps",
"distcp\\.copy\\.strategy",
"distcp\\.skip\\.crc",
"distcp\\.copy\\.overwrite",
"distcp\\.copy\\.append",
"distcp\\.map\\.bandwidth\\.mb",
"distcp\\.dynamic\\..*",
"distcp\\.meta\\.folder",
"distcp\\.copy\\.listing\\.class",
"distcp\\.filters\\.class",
"distcp\\.options\\.skipcrccheck",
"distcp\\.options\\.m",
"distcp\\.options\\.numListstatusThreads",
"distcp\\.options\\.mapredSslConf",
"distcp\\.options\\.bandwidth",
"distcp\\.options\\.overwrite",
"distcp\\.options\\.strategy",
"distcp\\.options\\.i",
"distcp\\.options\\.p.*",
"distcp\\.options\\.update",
"distcp\\.options\\.delete",
"mapred\\.map\\..*",
"mapred\\.reduce\\..*",
"mapred\\.output\\.compression\\.codec",
"mapred\\.job\\.queue\\.name",
"mapred\\.output\\.compression\\.type",
"mapred\\.min\\.split\\.size",
"mapreduce\\.job\\.reduce\\.slowstart\\.completedmaps",
"mapreduce\\.job\\.queuename",
"mapreduce\\.job\\.tags",
"mapreduce\\.input\\.fileinputformat\\.split\\.minsize",
"mapreduce\\.map\\..*",
"mapreduce\\.reduce\\..*",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.codec",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.type",
"oozie\\..*",
"tez\\.am\\..*",
"tez\\.task\\..*",
"tez\\.runtime\\..*",
"tez\\.queue\\.name",
};
/**
* Apply system properties to this object if the property name is defined in ConfVars
* and the value is non-null and not an empty string.
*/
private void applySystemProperties() {
Map<String, String> systemProperties = getConfSystemProperties();
for (Entry<String, String> systemProperty : systemProperties.entrySet()) {
this.set(systemProperty.getKey(), systemProperty.getValue());
}
}
/**
* This method returns a mapping from config variable name to its value for all config variables
* which have been set using System properties
*/
public static Map<String, String> getConfSystemProperties() {
Map<String, String> systemProperties = new HashMap<String, String>();
for (ConfVars oneVar : ConfVars.values()) {
if (System.getProperty(oneVar.varname) != null) {
if (System.getProperty(oneVar.varname).length() > 0) {
systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname));
}
}
}
return systemProperties;
}
/**
* Overlays ConfVar properties with non-null values
*/
private static void applyDefaultNonNullConfVars(Configuration conf) {
for (ConfVars var : ConfVars.values()) {
String defaultValue = var.getDefaultValue();
if (defaultValue == null) {
// Don't override ConfVars with null values
continue;
}
conf.set(var.varname, defaultValue);
}
}
public Properties getChangedProperties() {
Properties ret = new Properties();
Properties newProp = getAllProperties();
for (Object one : newProp.keySet()) {
String oneProp = (String) one;
String oldValue = origProp.getProperty(oneProp);
if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) {
ret.setProperty(oneProp, newProp.getProperty(oneProp));
}
}
return (ret);
}
public String getJar() {
return hiveJar;
}
/**
* @return the auxJars
*/
public String getAuxJars() {
return auxJars;
}
/**
* Set the auxiliary jars. Used for unit tests only.
* @param auxJars the auxJars to set.
*/
public void setAuxJars(String auxJars) {
this.auxJars = auxJars;
setVar(this, ConfVars.HIVEAUXJARS, auxJars);
}
public URL getHiveDefaultLocation() {
return hiveDefaultURL;
}
public static void setHiveSiteLocation(URL location) {
hiveSiteURL = location;
}
public static void setHivemetastoreSiteUrl(URL location) {
hivemetastoreSiteUrl = location;
}
public static URL getHiveSiteLocation() {
return hiveSiteURL;
}
public static URL getMetastoreSiteLocation() {
return hivemetastoreSiteUrl;
}
public static URL getHiveServer2SiteLocation() {
return hiveServer2SiteUrl;
}
/**
* @return the user name set in hadoop.job.ugi param or the current user from System
* @throws IOException
*/
public String getUser() throws IOException {
try {
UserGroupInformation ugi = Utils.getUGI();
return ugi.getUserName();
} catch (LoginException le) {
throw new IOException(le);
}
}
public static String getColumnInternalName(int pos) {
return "_col" + pos;
}
public static int getPositionFromInternalName(String internalName) {
Pattern internalPattern = Pattern.compile("_col([0-9]+)");
Matcher m = internalPattern.matcher(internalName);
if (!m.matches()){
return -1;
} else {
return Integer.parseInt(m.group(1));
}
}
/**
* Append comma separated list of config vars to the restrict List
* @param restrictListStr
*/
public void addToRestrictList(String restrictListStr) {
if (restrictListStr == null) {
return;
}
String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
if (oldList == null || oldList.isEmpty()) {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr);
} else {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr);
}
setupRestrictList();
}
/**
* Set white list of parameters that are allowed to be modified
*
* @param paramNameRegex
*/
@LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
public void setModifiableWhiteListRegex(String paramNameRegex) {
if (paramNameRegex == null) {
return;
}
modWhiteListPattern = Pattern.compile(paramNameRegex);
}
/**
* Add the HIVE_CONF_RESTRICTED_LIST values to restrictList,
* including HIVE_CONF_RESTRICTED_LIST itself
*/
private void setupRestrictList() {
String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
restrictList.clear();
if (restrictListStr != null) {
for (String entry : restrictListStr.split(",")) {
restrictList.add(entry.trim());
}
}
String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
if (internalVariableListStr != null) {
for (String entry : internalVariableListStr.split(",")) {
restrictList.add(entry.trim());
}
}
restrictList.add(ConfVars.HIVE_IN_TEST.varname);
restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
restrictList.add(ConfVars.HIVE_SPARK_RSC_CONF_LIST.varname);
}
private void setupRSCList() {
rscList.clear();
String vars = this.getVar(ConfVars.HIVE_SPARK_RSC_CONF_LIST);
if (vars != null) {
for (String var : vars.split(",")) {
rscList.add(var.trim());
}
}
}
/**
* Strips hidden config entries from configuration
*/
public void stripHiddenConfigurations(Configuration conf) {
HiveConfUtil.stripConfigurations(conf, hiddenSet);
}
/**
* @return true if HS2 webui is enabled
*/
public boolean isWebUiEnabled() {
return this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT) != 0;
}
/**
* @return true if HS2 webui query-info cache is enabled
*/
public boolean isWebUiQueryInfoCacheEnabled() {
return isWebUiEnabled() && this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES) > 0;
}
/* Dynamic partition pruning is enabled in some or all cases
*/
public boolean isSparkDPPAny() {
return isSparkDPPAny(this);
}
/* Dynamic partition pruning is enabled only for map join
* hive.spark.dynamic.partition.pruning is false and
* hive.spark.dynamic.partition.pruning.map.join.only is true
*/
public boolean isSparkDPPOnlyMapjoin() {
return (!this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING) &&
this.getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAP_JOIN_ONLY));
}
public static boolean isLoadMetastoreConfig() {
return loadMetastoreConfig;
}
public static void setLoadMetastoreConfig(boolean loadMetastoreConfig) {
HiveConf.loadMetastoreConfig = loadMetastoreConfig;
}
public static boolean isLoadHiveServer2Config() {
return loadHiveServer2Config;
}
public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) {
HiveConf.loadHiveServer2Config = loadHiveServer2Config;
}
public static class StrictChecks {
private static final String NO_LIMIT_MSG = makeMessage(
"Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT);
public static final String NO_PARTITIONLESS_MSG = makeMessage(
"Queries against partitioned tables without a partition filter",
ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER);
private static final String NO_COMPARES_MSG = makeMessage(
"Unsafe compares between different types", ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
private static final String NO_CARTESIAN_MSG = makeMessage(
"Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN);
private static final String NO_BUCKETING_MSG = makeMessage(
"Load into bucketed tables", ConfVars.HIVE_STRICT_CHECKS_BUCKETING);
private static String makeMessage(String what, ConfVars setting) {
return what + " are disabled for safety reasons. If you know what you are doing, please set "
+ setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname +
" is not set to 'strict' to proceed. Note that you may get errors or incorrect " +
"results if you make a mistake while using some of the unsafe features.";
}
public static String checkNoLimit(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_ORDERBY_NO_LIMIT) ? null : NO_LIMIT_MSG;
}
public static String checkNoPartitionFilter(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_NO_PARTITION_FILTER)
? null : NO_PARTITIONLESS_MSG;
}
public static String checkTypeSafety(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : NO_COMPARES_MSG;
}
public static String checkCartesian(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : NO_CARTESIAN_MSG;
}
public static String checkBucketing(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING) ? null : NO_BUCKETING_MSG;
}
private static boolean isAllowed(Configuration conf, ConfVars setting) {
String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null);
return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting);
}
}
public static String getNonMrEngines() {
String result = StringUtils.EMPTY;
for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) {
if ("mr".equals(s)) {
continue;
}
if (!result.isEmpty()) {
result += ", ";
}
result += s;
}
return result;
}
public static String generateMrDeprecationWarning() {
return "Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. "
+ "Consider using a different execution engine (i.e. " + HiveConf.getNonMrEngines()
+ ") or using Hive 1.X releases.";
}
private static final Object reverseMapLock = new Object();
private static HashMap<String, ConfVars> reverseMap = null;
public static HashMap<String, ConfVars> getOrCreateReverseMap() {
// This should be called rarely enough; for now it's ok to just lock every time.
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
}
HashMap<String, ConfVars> vars = new HashMap<>();
for (ConfVars val : ConfVars.values()) {
vars.put(val.varname.toLowerCase(), val);
if (val.altName != null && !val.altName.isEmpty()) {
vars.put(val.altName.toLowerCase(), val);
}
}
synchronized (reverseMapLock) {
if (reverseMap != null) {
return reverseMap;
}
reverseMap = vars;
return reverseMap;
}
}
public void verifyAndSetAll(Map<String, String> overlay) {
for (Entry<String, String> entry : overlay.entrySet()) {
verifyAndSet(entry.getKey(), entry.getValue());
}
}
public Map<String, String> subtree(String string) {
Map<String, String> ret = new HashMap<>();
for (Entry<Object, Object> entry : getProps().entrySet()) {
String key = (String) entry.getKey();
String value = (String) entry.getValue();
if (key.startsWith(string)) {
ret.put(key.substring(string.length() + 1), value);
}
}
return ret;
}
}
| [
"\"HIVE_CONF_DIR\"",
"\"HIVE_HOME\"",
"\"HADOOP_HOME\"",
"\"HADOOP_PREFIX\""
]
| []
| [
"HADOOP_PREFIX",
"HADOOP_HOME",
"HIVE_CONF_DIR",
"HIVE_HOME"
]
| [] | ["HADOOP_PREFIX", "HADOOP_HOME", "HIVE_CONF_DIR", "HIVE_HOME"] | java | 4 | 0 | |
pkg/db/interface.go | package db
import (
crand "crypto/rand"
"database/sql"
"errors"
"fmt"
"github.com/golang/protobuf/jsonpb"
"log"
"math/big"
"math/rand"
"os"
"strings"
"time"
api "github.com/kubeflow/katib/pkg/api"
_ "github.com/go-sql-driver/mysql"
)
const (
dbDriver = "mysql"
dbNameTmpl = "root:%s@tcp(vizier-db:3306)/vizier"
mysqlTimeFmt = "2006-01-02 15:04:05.999999"
)
type GetWorkerLogOpts struct {
Name string
SinceTime *time.Time
Descending bool
Limit int32
Objective bool
}
type WorkerLog struct {
Time time.Time
Name string
Value string
}
type VizierDBInterface interface {
DBInit()
GetStudyConfig(string) (*api.StudyConfig, error)
GetStudyList() ([]string, error)
CreateStudy(*api.StudyConfig) (string, error)
DeleteStudy(string) error
GetTrial(string) (*api.Trial, error)
GetTrialList(string) ([]*api.Trial, error)
CreateTrial(*api.Trial) error
DeleteTrial(string) error
GetWorker(string) (*api.Worker, error)
GetWorkerStatus(string) (*api.State, error)
GetWorkerList(string, string) ([]*api.Worker, error)
GetWorkerLogs(string, *GetWorkerLogOpts) ([]*WorkerLog, error)
GetWorkerTimestamp(string) (*time.Time, error)
StoreWorkerLogs(string, []*api.MetricsLog) error
CreateWorker(*api.Worker) (string, error)
UpdateWorker(string, api.State) error
DeleteWorker(string) error
GetWorkerFullInfo(string, string, string, bool) (*api.GetWorkerFullInfoReply, error)
SetSuggestionParam(string, string, []*api.SuggestionParameter) (string, error)
UpdateSuggestionParam(string, []*api.SuggestionParameter) error
GetSuggestionParam(string) ([]*api.SuggestionParameter, error)
GetSuggestionParamList(string) ([]*api.SuggestionParameterSet, error)
SetEarlyStopParam(string, string, []*api.EarlyStoppingParameter) (string, error)
UpdateEarlyStopParam(string, []*api.EarlyStoppingParameter) error
GetEarlyStopParam(string) ([]*api.EarlyStoppingParameter, error)
GetEarlyStopParamList(string) ([]*api.EarlyStoppingParameterSet, error)
}
type dbConn struct {
db *sql.DB
}
var rs1Letters = []rune("abcdefghijklmnopqrstuvwxyz")
func getDbName() string {
dbPass := os.Getenv("MYSQL_ROOT_PASSWORD")
if dbPass == "" {
log.Printf("WARN: Env var MYSQL_ROOT_PASSWORD is empty. Falling back to \"test\".")
// For backward compatibility, e.g. in case that all but vizier-core
// is older ones so we do not have Secret nor upgraded vizier-db.
dbPass = "test"
}
return fmt.Sprintf(dbNameTmpl, dbPass)
}
func NewWithSQLConn(db *sql.DB) VizierDBInterface {
d := new(dbConn)
d.db = db
seed, err := crand.Int(crand.Reader, big.NewInt(1<<63-1))
if err != nil {
log.Fatalf("RNG initialization failed: %v", err)
}
// We can do the following instead, but it creates a locking issue
//d.rng = rand.New(rand.NewSource(seed.Int64()))
rand.Seed(seed.Int64())
return d
}
func New() VizierDBInterface {
db, err := sql.Open(dbDriver, getDbName())
if err != nil {
log.Fatalf("DB open failed: %v", err)
}
return NewWithSQLConn(db)
}
func generateRandid() string {
// UUID isn't quite handy in the Go world
id := make([]byte, 8)
_, err := rand.Read(id)
if err != nil {
log.Printf("Error reading random: %v", err)
return ""
}
return string(rs1Letters[rand.Intn(len(rs1Letters))]) + fmt.Sprintf("%016x", id)[1:]
}
func isDBDuplicateError(err error) bool {
errmsg := strings.ToLower(err.Error())
if strings.Contains(errmsg, "unique") || strings.Contains(errmsg, "duplicate") {
return true
}
return false
}
func (d *dbConn) GetStudyConfig(id string) (*api.StudyConfig, error) {
row := d.db.QueryRow("SELECT * FROM studies WHERE id = ?", id)
study := new(api.StudyConfig)
var dummyID, configs, tags, metrics string
err := row.Scan(&dummyID,
&study.Name,
&study.Owner,
&study.OptimizationType,
&study.OptimizationGoal,
&configs,
&tags,
&study.ObjectiveValueName,
&metrics,
&study.JobId,
)
if err != nil {
return nil, err
}
study.ParameterConfigs = new(api.StudyConfig_ParameterConfigs)
err = jsonpb.UnmarshalString(configs, study.ParameterConfigs)
if err != nil {
return nil, err
}
var tagsArray []string
if len(tags) > 0 {
tagsArray = strings.Split(tags, ",\n")
}
study.Tags = make([]*api.Tag, len(tagsArray))
for i, j := range tagsArray {
tag := new(api.Tag)
err = jsonpb.UnmarshalString(j, tag)
if err != nil {
log.Printf("err unmarshal %s", j)
return nil, err
}
study.Tags[i] = tag
}
study.Metrics = strings.Split(metrics, ",\n")
return study, nil
}
func (d *dbConn) GetStudyList() ([]string, error) {
rows, err := d.db.Query("SELECT id FROM studies")
if err != nil {
return nil, err
}
defer rows.Close()
var result []string
for rows.Next() {
var id string
err = rows.Scan(&id)
if err != nil {
log.Printf("err scanning studies.id: %v", err)
continue
}
result = append(result, id)
}
return result, nil
}
func (d *dbConn) CreateStudy(in *api.StudyConfig) (string, error) {
if in.ParameterConfigs == nil {
return "", errors.New("ParameterConfigs must be set")
}
if in.JobId != "" {
row := d.db.QueryRow("SELECT * FROM studies WHERE job_id = ?", in.JobId)
dummyStudy := new(api.StudyConfig)
var dummyID, dummyConfigs, dummyTags, dummyMetrics, dummyJobID string
err := row.Scan(&dummyID,
&dummyStudy.Name,
&dummyStudy.Owner,
&dummyStudy.OptimizationType,
&dummyStudy.OptimizationGoal,
&dummyConfigs,
&dummyTags,
&dummyStudy.ObjectiveValueName,
&dummyMetrics,
&dummyJobID,
)
if err == nil {
return "", fmt.Errorf("Study %s in Job %s already exist.", in.Name, in.JobId)
}
}
configs, err := (&jsonpb.Marshaler{}).MarshalToString(in.ParameterConfigs)
if err != nil {
log.Fatalf("Error marshaling configs: %v", err)
}
tags := make([]string, len(in.Tags))
for i, elem := range in.Tags {
tags[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling %v: %v", elem, err)
continue
}
}
var isin bool = false
for _, m := range in.Metrics {
if m == in.ObjectiveValueName {
isin = true
}
}
if !isin {
in.Metrics = append(in.Metrics, in.ObjectiveValueName)
}
var studyID string
i := 3
for true {
studyID = generateRandid()
_, err := d.db.Exec(
"INSERT INTO studies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
studyID,
in.Name,
in.Owner,
in.OptimizationType,
in.OptimizationGoal,
configs,
strings.Join(tags, ",\n"),
in.ObjectiveValueName,
strings.Join(in.Metrics, ",\n"),
in.JobId,
)
if err == nil {
break
} else if isDBDuplicateError(err) {
i--
if i > 0 {
continue
}
}
return "", err
}
for _, perm := range in.AccessPermissions {
_, err := d.db.Exec(
"INSERT INTO study_permissions (study_id, access_permission) "+
"VALUES (?, ?)",
studyID, perm)
if err != nil {
log.Printf("Error storing permission (%s, %s): %v",
studyID, perm, err)
}
}
return studyID, nil
}
func (d *dbConn) DeleteStudy(id string) error {
_, err := d.db.Exec("DELETE FROM studies WHERE id = ?", id)
return err
}
func (d *dbConn) getTrials(trialID string, studyID string) ([]*api.Trial, error) {
var rows *sql.Rows
var err error
if trialID != "" {
rows, err = d.db.Query("SELECT * FROM trials WHERE id = ?", trialID)
} else if studyID != "" {
rows, err = d.db.Query("SELECT * FROM trials WHERE study_id = ?", studyID)
} else {
return nil, errors.New("trial_id or study_id must be set")
}
if err != nil {
return nil, err
}
var result []*api.Trial
for rows.Next() {
trial := new(api.Trial)
var parameters, tags string
err := rows.Scan(&trial.TrialId,
&trial.StudyId,
¶meters,
&trial.ObjectiveValue,
&tags,
)
if err != nil {
return nil, err
}
params := strings.Split(parameters, ",\n")
p := make([]*api.Parameter, len(params))
for i, pstr := range params {
if pstr == "" {
continue
}
p[i] = &api.Parameter{}
err := jsonpb.UnmarshalString(pstr, p[i])
if err != nil {
return nil, err
}
}
trial.ParameterSet = p
taglist := strings.Split(tags, ",\n")
t := make([]*api.Tag, len(taglist))
for i, tstr := range taglist {
t[i] = &api.Tag{}
if tstr == "" {
continue
}
err := jsonpb.UnmarshalString(tstr, t[i])
if err != nil {
return nil, err
}
}
trial.Tags = t
result = append(result, trial)
}
return result, nil
}
func (d *dbConn) GetTrial(id string) (*api.Trial, error) {
trials, err := d.getTrials(id, "")
if err != nil {
return nil, err
}
if len(trials) > 1 {
return nil, errors.New("multiple trials found")
} else if len(trials) == 0 {
return nil, errors.New("trials not found")
}
return trials[0], nil
}
func (d *dbConn) GetTrialList(id string) ([]*api.Trial, error) {
trials, err := d.getTrials("", id)
return trials, err
}
func (d *dbConn) CreateTrial(trial *api.Trial) error {
// This function sets trial.id, unlike old dbInsertTrials().
// Users should not overwrite trial.id
var err, lastErr error
params := make([]string, len(trial.ParameterSet))
for i, elem := range trial.ParameterSet {
params[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling trial.ParameterSet %v: %v",
elem, err)
lastErr = err
}
}
tags := make([]string, len(trial.Tags))
for i := range tags {
tags[i], err = (&jsonpb.Marshaler{}).MarshalToString(trial.Tags[i])
if err != nil {
log.Printf("Error marshalling trial.Tags %v: %v",
trial.Tags[i], err)
lastErr = err
}
}
var trialID string
i := 3
for true {
trialID = generateRandid()
_, err = d.db.Exec("INSERT INTO trials VALUES (?, ?, ?, ?, ?)",
trialID, trial.StudyId, strings.Join(params, ",\n"),
trial.ObjectiveValue, strings.Join(tags, ",\n"))
if err == nil {
trial.TrialId = trialID
break
} else if isDBDuplicateError(err) {
i--
if i > 0 {
continue
}
}
return err
}
return lastErr
}
func (d *dbConn) DeleteTrial(id string) error {
_, err := d.db.Exec("DELETE FROM trials WHERE id = ?", id)
return err
}
func (d *dbConn) GetWorkerLogs(id string, opts *GetWorkerLogOpts) ([]*WorkerLog, error) {
qstr := ""
qfield := []interface{}{id}
order := ""
if opts != nil {
if opts.SinceTime != nil {
qstr += " AND time >= ?"
qfield = append(qfield, opts.SinceTime)
}
if opts.Name != "" {
qstr += " AND name = ?"
qfield = append(qfield, opts.Name)
}
if opts.Objective {
qstr += " AND is_objective = 1"
}
if opts.Descending {
order = " DESC"
}
if opts.Limit > 0 {
order += fmt.Sprintf(" LIMIT %d", opts.Limit)
}
}
rows, err := d.db.Query("SELECT time, name, value FROM worker_metrics WHERE worker_id = ?"+
qstr+" ORDER BY time"+order, qfield...)
if err != nil {
return nil, err
}
var result []*WorkerLog
for rows.Next() {
log1 := new(WorkerLog)
var timeStr string
err := rows.Scan(&timeStr, &((*log1).Name), &((*log1).Value))
if err != nil {
log.Printf("Error scanning log: %v", err)
continue
}
log1.Time, err = time.Parse(mysqlTimeFmt, timeStr)
if err != nil {
log.Printf("Error parsing time %s: %v", timeStr, err)
continue
}
result = append(result, log1)
}
return result, nil
}
func (d *dbConn) getWorkerLastlogs(id string) (time.Time, []*WorkerLog, error) {
var timeStr string
var timeVal time.Time
var err error
// Use LEFT JOIN to ensure a result even if there's no matching
// in worker_metrics.
rows, err := d.db.Query(
`SELECT worker_lastlogs.time, name, value FROM worker_lastlogs
LEFT JOIN worker_metrics
ON (worker_lastlogs.worker_id = worker_metrics.worker_id AND worker_lastlogs.time = worker_metrics.time)
WHERE worker_lastlogs.worker_id = ?`, id)
if err != nil {
return timeVal, nil, err
}
var result []*WorkerLog
for rows.Next() {
log1 := new(WorkerLog)
var thisTime string
var name, value sql.NullString
err := rows.Scan(&thisTime, &name, &value)
if err != nil {
log.Printf("Error scanning log: %v", err)
continue
}
if timeStr == "" {
timeStr = thisTime
timeVal, err = time.Parse(mysqlTimeFmt, timeStr)
if err != nil {
log.Printf("Error parsing time %s: %v", timeStr, err)
return timeVal, nil, err
}
} else if timeStr != thisTime {
log.Printf("Unexpected query result %s != %s",
timeStr, thisTime)
}
log1.Time = timeVal
if !name.Valid {
continue
}
(*log1).Name = name.String
(*log1).Value = value.String
result = append(result, log1)
}
return timeVal, result, nil
}
func (d *dbConn) GetWorkerTimestamp(id string) (*time.Time, error) {
var lastTimestamp string
row := d.db.QueryRow("SELECT time FROM worker_lastlogs WHERE worker_id = ?", id)
err := row.Scan(&lastTimestamp)
switch {
case err == sql.ErrNoRows:
return nil, nil
case err != nil:
return nil, err
default:
mt, err := time.Parse(mysqlTimeFmt, lastTimestamp)
if err != nil {
log.Printf("Error parsing time in log %s: %v",
lastTimestamp, err)
return nil, err
}
return &mt, nil
}
}
func (d *dbConn) storeWorkerLog(workerID string, time string, metricsName string, metricsValue string, objectiveValueName string) error {
isObjective := 0
if metricsName == objectiveValueName {
isObjective = 1
}
_, err := d.db.Exec("INSERT INTO worker_metrics (worker_id, time, name, value, is_objective) VALUES (?, ?, ?, ?, ?)",
workerID, time, metricsName, metricsValue, isObjective)
if err != nil {
return err
}
return nil
}
func (d *dbConn) StoreWorkerLogs(workerID string, logs []*api.MetricsLog) error {
var lasterr error
dbT, lastLogs, err := d.getWorkerLastlogs(workerID)
if err != nil {
log.Printf("Error getting last log timestamp: %v", err)
}
row := d.db.QueryRow("SELECT objective_value_name FROM workers "+
"JOIN (studies) ON (workers.study_id = studies.id) WHERE "+
"workers.id = ?", workerID)
var objectiveValueName string
err = row.Scan(&objectiveValueName)
if err != nil {
log.Printf("Cannot get objective_value_name or metrics: %v", err)
return err
}
// Store logs when
// 1. a log is newer than dbT, or,
// 2. a log is not yet in the DB when the timestamps are equal
var formattedTime string
var lastTime time.Time
for _, mlog := range logs {
metricsName := mlog.Name
logLoop:
for _, mv := range mlog.Values {
t, err := time.Parse(time.RFC3339Nano, mv.Time)
if err != nil {
log.Printf("Error parsing time %s: %v", mv.Time, err)
lasterr = err
continue
}
if t.Before(dbT) {
// dbT is from mysql and has microsec precision.
// This code assumes nanosec fractions are rounded down.
continue
}
// use UTC as mysql DATETIME lacks timezone
formattedTime = t.UTC().Format(mysqlTimeFmt)
if !dbT.IsZero() {
// Parse again to get rounding effect, otherwise
// the next comparison will be almost always false.
reparsed_time, err := time.Parse(mysqlTimeFmt, formattedTime)
if err != nil {
log.Printf("Error parsing time %s: %v", formattedTime, err)
lasterr = err
continue
}
if reparsed_time == dbT {
for _, l := range lastLogs {
if l.Name == metricsName && l.Value == mv.Value {
continue logLoop
}
}
}
}
err = d.storeWorkerLog(workerID,
formattedTime,
metricsName, mv.Value,
objectiveValueName)
if err != nil {
log.Printf("Error storing log %s: %v", mv.Value, err)
lasterr = err
} else if t.After(lastTime) {
lastTime = t
}
}
}
if lasterr != nil {
// If lastlog were updated, logs that couldn't be saved
// would be lost.
return lasterr
}
if !lastTime.IsZero() {
formattedTime = lastTime.UTC().Format(mysqlTimeFmt)
_, err = d.db.Exec("REPLACE INTO worker_lastlogs VALUES (?, ?)",
workerID, formattedTime)
}
return err
}
func (d *dbConn) getWorkers(workerID string, trialID string, studyID string) ([]*api.Worker, error) {
var rows *sql.Rows
var err error
if workerID != "" {
rows, err = d.db.Query("SELECT * FROM workers WHERE id = ?", workerID)
} else if trialID != "" {
rows, err = d.db.Query("SELECT * FROM workers WHERE trial_id = ?", trialID)
} else if studyID != "" {
rows, err = d.db.Query("SELECT * FROM workers WHERE study_id = ?", studyID)
} else {
return nil, errors.New("worker_id, trial_id or study_id must be set")
}
if err != nil {
return nil, err
}
var result []*api.Worker
for rows.Next() {
worker := new(api.Worker)
var tags string
err := rows.Scan(
&worker.WorkerId,
&worker.StudyId,
&worker.TrialId,
&worker.Type,
&worker.Status,
&worker.TemplatePath,
&tags,
)
if err != nil {
return nil, err
}
taglist := strings.Split(tags, ",\n")
t := make([]*api.Tag, len(taglist))
for i, tstr := range taglist {
t[i] = &api.Tag{}
if tstr == "" {
continue
}
err := jsonpb.UnmarshalString(tstr, t[i])
if err != nil {
return nil, err
}
}
worker.Tags = t
result = append(result, worker)
}
return result, nil
}
func (d *dbConn) GetWorker(id string) (*api.Worker, error) {
workers, err := d.getWorkers(id, "", "")
if err != nil {
return nil, err
}
if len(workers) > 1 {
return nil, errors.New("multiple workers found")
} else if len(workers) == 0 {
return nil, errors.New("worker not found")
}
return workers[0], nil
}
func (d *dbConn) GetWorkerStatus(id string) (*api.State, error) {
status := api.State_ERROR
row := d.db.QueryRow("SELECT status FROM workers WHERE id = ?", id)
err := row.Scan(&status)
if err != nil {
return &status, err
}
return &status, nil
}
func (d *dbConn) GetWorkerList(sid string, tid string) ([]*api.Worker, error) {
workers, err := d.getWorkers("", tid, sid)
return workers, err
}
func (d *dbConn) CreateWorker(worker *api.Worker) (string, error) {
// Users should not overwrite worker.id
var err, lastErr error
tags := make([]string, len(worker.Tags))
for i := range tags {
tags[i], err = (&jsonpb.Marshaler{}).MarshalToString(worker.Tags[i])
if err != nil {
log.Printf("Error marshalling worker.Tags %v: %v",
worker.Tags[i], err)
lastErr = err
}
}
var workerID string
i := 3
for true {
workerID = generateRandid()
_, err = d.db.Exec("INSERT INTO workers VALUES (?, ?, ?, ?, ?, ?, ?)",
workerID, worker.StudyId, worker.TrialId, worker.Type,
api.State_PENDING, worker.TemplatePath, strings.Join(tags, ",\n"))
if err == nil {
worker.WorkerId = workerID
break
} else if isDBDuplicateError(err) {
i--
if i > 0 {
continue
}
}
return "", err
}
return worker.WorkerId, lastErr
}
func (d *dbConn) UpdateWorker(id string, newstatus api.State) error {
_, err := d.db.Exec("UPDATE workers SET status = ? WHERE id = ?", newstatus, id)
return err
}
func (d *dbConn) DeleteWorker(id string) error {
_, err := d.db.Exec("DELETE FROM workers WHERE id = ?", id)
return err
}
func (d *dbConn) GetWorkerFullInfo(studyId string, trialId string, workerId string, OnlyLatestLog bool) (*api.GetWorkerFullInfoReply, error) {
ret := &api.GetWorkerFullInfoReply{}
var err error
ws := []*api.Worker{}
if workerId != "" {
w, err := d.GetWorker(workerId)
ws = append(ws, w)
if err != nil {
return ret, err
}
} else {
ws, err = d.GetWorkerList(studyId, trialId)
if err != nil {
return ret, err
}
}
ts, err := d.GetTrialList(studyId)
if err != nil {
return ret, err
}
sc, err := d.GetStudyConfig(studyId)
if err != nil {
return ret, err
}
plist := make(map[string][]*api.Parameter)
for _, t := range ts {
plist[t.TrialId] = t.ParameterSet
}
wfilist := make([]*api.WorkerFullInfo, len(ws))
var qstr, id string
if OnlyLatestLog {
qstr = `
SELECT
WM.worker_id, WM.time, WM.name, WM.value
FROM (
SELECT
Master.worker_id, Master.time, Master.name, Master.value
FROM (
SELECT
worker_id, name,
MAX(id) AS MaxID
FROM
worker_metrics
GROUP BY
worker_id, name
) AS LATEST
JOIN worker_metrics AS Master
ON Master.id = LATEST.MaxID
) AS WM
JOIN workers AS WS
ON WM.worker_id = WS.id
AND`
} else {
qstr = `
SELECT
WM.worker_id, WM.time, WM.name, WM.value
FROM
worker_metrics AS WM
JOIN workers AS WS
ON WM.worker_id = WS.id
AND`
}
if workerId != "" {
if OnlyLatestLog {
qstr = `
SELECT
WM.worker_id, WM.time, WM.name, WM.value
FROM (
SELECT
Master.worker_id, Master.time, Master.name, Master.value
FROM (
SELECT
worker_id, name,
MAX(id) AS MaxID
FROM
worker_metrics
GROUP BY
worker_id, name
) AS LATEST
JOIN worker_metrics AS Master
ON Master.id = LATEST.MaxID
AND Master.worker_id = ?
) AS WM`
} else {
qstr = "SELECT worker_id, time, name, value FROM worker_metrics WHERE worker_id = ?"
}
id = workerId
} else if trialId != "" {
qstr += " WS.trial_id = ? "
id = trialId
} else if studyId != "" {
qstr += " WS.study_id = ? "
id = studyId
}
rows, err := d.db.Query(qstr+" ORDER BY time", id)
if err != nil {
log.Printf("SQL query: %v", err)
return ret, err
}
metricslist := make(map[string]map[string][]*api.MetricsValueTime, len(ws))
for rows.Next() {
var name, value, timeStr, wid string
err := rows.Scan(&wid, &timeStr, &name, &value)
if err != nil {
log.Printf("Error scanning log: %v", err)
continue
}
ptime, err := time.Parse(mysqlTimeFmt, timeStr)
if err != nil {
log.Printf("Error parsing time %s: %v", timeStr, err)
continue
}
if _, ok := metricslist[wid]; ok {
metricslist[wid][name] = append(metricslist[wid][name], &api.MetricsValueTime{
Value: value,
Time: ptime.UTC().Format(time.RFC3339Nano),
})
} else {
metricslist[wid] = make(map[string][]*api.MetricsValueTime, len(sc.Metrics))
metricslist[wid][name] = append(metricslist[wid][name], &api.MetricsValueTime{
Value: value,
Time: ptime.UTC().Format(time.RFC3339Nano),
})
}
}
for i, w := range ws {
wfilist[i] = &api.WorkerFullInfo{
Worker: w,
ParameterSet: plist[w.TrialId],
}
for _, m := range sc.Metrics {
if v, ok := metricslist[w.WorkerId][m]; ok {
wfilist[i].MetricsLogs = append(wfilist[i].MetricsLogs, &api.MetricsLog{
Name: m,
Values: v,
},
)
}
}
}
ret.WorkerFullInfos = wfilist
return ret, nil
}
func (d *dbConn) SetSuggestionParam(algorithm string, studyID string, params []*api.SuggestionParameter) (string, error) {
var err error
ps := make([]string, len(params))
for i, elem := range params {
ps[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling %v: %v", elem, err)
return "", err
}
}
var paramID string
for true {
paramID = generateRandid()
_, err = d.db.Exec("INSERT INTO suggestion_param VALUES (?, ?, ?, ?)",
paramID, algorithm, studyID, strings.Join(ps, ",\n"))
if err == nil {
break
} else if !isDBDuplicateError(err) {
return "", err
}
}
return paramID, err
}
func (d *dbConn) UpdateSuggestionParam(paramID string, params []*api.SuggestionParameter) error {
var err error
ps := make([]string, len(params))
for i, elem := range params {
ps[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling %v: %v", elem, err)
return err
}
}
_, err = d.db.Exec("UPDATE suggestion_param SET parameters = ? WHERE id = ?",
strings.Join(ps, ",\n"), paramID)
return err
}
func (d *dbConn) GetSuggestionParam(paramID string) ([]*api.SuggestionParameter, error) {
var params string
row := d.db.QueryRow("SELECT parameters FROM suggestion_param WHERE id = ?", paramID)
err := row.Scan(¶ms)
if err != nil {
return nil, err
}
var pArray []string
if len(params) > 0 {
pArray = strings.Split(params, ",\n")
} else {
return nil, nil
}
ret := make([]*api.SuggestionParameter, len(pArray))
for i, j := range pArray {
p := new(api.SuggestionParameter)
err = jsonpb.UnmarshalString(j, p)
if err != nil {
log.Printf("err unmarshal %s", j)
return nil, err
}
ret[i] = p
}
return ret, nil
}
func (d *dbConn) GetSuggestionParamList(studyID string) ([]*api.SuggestionParameterSet, error) {
var rows *sql.Rows
var err error
rows, err = d.db.Query("SELECT id, suggestion_algo, parameters FROM suggestion_param WHERE study_id = ?", studyID)
if err != nil {
return nil, err
}
var result []*api.SuggestionParameterSet
for rows.Next() {
var id string
var algorithm string
var params string
err := rows.Scan(&id, &algorithm, ¶ms)
if err != nil {
return nil, err
}
var pArray []string
if len(params) > 0 {
pArray = strings.Split(params, ",\n")
} else {
return nil, nil
}
suggestparams := make([]*api.SuggestionParameter, len(pArray))
for i, j := range pArray {
p := new(api.SuggestionParameter)
err = jsonpb.UnmarshalString(j, p)
if err != nil {
log.Printf("err unmarshal %s", j)
return nil, err
}
suggestparams[i] = p
}
result = append(result, &api.SuggestionParameterSet{
ParamId: id,
SuggestionAlgorithm: algorithm,
SuggestionParameters: suggestparams,
})
}
return result, nil
}
func (d *dbConn) SetEarlyStopParam(algorithm string, studyID string, params []*api.EarlyStoppingParameter) (string, error) {
ps := make([]string, len(params))
var err error
for i, elem := range params {
ps[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling %v: %v", elem, err)
return "", err
}
}
var paramID string
for true {
paramID = generateRandid()
_, err = d.db.Exec("INSERT INTO earlystopping_param VALUES (?,?, ?, ?)",
paramID, algorithm, studyID, strings.Join(ps, ",\n"))
if err == nil {
break
} else if !isDBDuplicateError(err) {
return "", err
}
}
return paramID, nil
}
func (d *dbConn) UpdateEarlyStopParam(paramID string, params []*api.EarlyStoppingParameter) error {
ps := make([]string, len(params))
var err error
for i, elem := range params {
ps[i], err = (&jsonpb.Marshaler{}).MarshalToString(elem)
if err != nil {
log.Printf("Error marshalling %v: %v", elem, err)
return err
}
}
_, err = d.db.Exec("UPDATE earlystopping_param SET parameters = ? WHERE id = ?",
strings.Join(ps, ",\n"), paramID)
return err
}
func (d *dbConn) GetEarlyStopParam(paramID string) ([]*api.EarlyStoppingParameter, error) {
var params string
row := d.db.QueryRow("SELECT parameters FROM earlystopping_param WHERE id = ?", paramID)
err := row.Scan(¶ms)
if err != nil {
return nil, err
}
var pArray []string
if len(params) > 0 {
pArray = strings.Split(params, ",\n")
} else {
return nil, nil
}
ret := make([]*api.EarlyStoppingParameter, len(pArray))
for i, j := range pArray {
p := new(api.EarlyStoppingParameter)
err = jsonpb.UnmarshalString(j, p)
if err != nil {
log.Printf("err unmarshal %s", j)
return nil, err
}
ret[i] = p
}
return ret, nil
}
func (d *dbConn) GetEarlyStopParamList(studyID string) ([]*api.EarlyStoppingParameterSet, error) {
var rows *sql.Rows
var err error
rows, err = d.db.Query("SELECT id, earlystop_algo, parameters FROM earlystopping_param WHERE study_id = ?", studyID)
if err != nil {
return nil, err
}
var result []*api.EarlyStoppingParameterSet
for rows.Next() {
var id string
var algorithm string
var params string
err := rows.Scan(&id, &algorithm, ¶ms)
if err != nil {
return nil, err
}
var pArray []string
if len(params) > 0 {
pArray = strings.Split(params, ",\n")
} else {
return nil, nil
}
esparams := make([]*api.EarlyStoppingParameter, len(pArray))
for i, j := range pArray {
p := new(api.EarlyStoppingParameter)
err = jsonpb.UnmarshalString(j, p)
if err != nil {
log.Printf("err unmarshal %s", j)
return nil, err
}
esparams[i] = p
}
result = append(result, &api.EarlyStoppingParameterSet{
ParamId: id,
EarlyStoppingAlgorithm: algorithm,
EarlyStoppingParameters: esparams,
})
}
return result, nil
}
| [
"\"MYSQL_ROOT_PASSWORD\""
]
| []
| [
"MYSQL_ROOT_PASSWORD"
]
| [] | ["MYSQL_ROOT_PASSWORD"] | go | 1 | 0 | |
cinderclient/tests/unit/v3/test_shell.py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(geguileo): For v3 we cannot mock any of the following methods
# - utils.find_volume
# - shell_utils.find_backup
# - shell_utils.find_volume_snapshot
# - shell_utils.find_group
# - shell_utils.find_group_snapshot
# because we are caching them in cinderclient.v3.shell:RESET_STATE_RESOURCES
# which means that our tests could fail depending on the mocking and loading
# order.
#
# Alternatives are:
# - Mock utils.find_resource when we have only 1 call to that method
# - Use an auxiliary method that will call original method for irrelevant
# calls. Example from test_revert_to_snapshot:
# original = client_utils.find_resource
#
# def find_resource(manager, name_or_id, **kwargs):
# if isinstance(manager, volume_snapshots.SnapshotManager):
# return volume_snapshots.Snapshot(self,
# {'id': '5678',
# 'volume_id': '1234'})
# return original(manager, name_or_id, **kwargs)
from unittest import mock
import ddt
import fixtures
from requests_mock.contrib import fixture as requests_mock_fixture
import six
from six.moves.urllib import parse
import cinderclient
from cinderclient import api_versions
from cinderclient import base
from cinderclient import client
from cinderclient import exceptions
from cinderclient import shell
from cinderclient import utils as cinderclient_utils
from cinderclient.v3 import attachments
from cinderclient.v3 import volume_snapshots
from cinderclient.v3 import volumes
from cinderclient.tests.unit.fixture_data import keystone_client
from cinderclient.tests.unit import utils
from cinderclient.tests.unit.v3 import fakes
@ddt.ddt
@mock.patch.object(client, 'Client', fakes.FakeClient)
class ShellTest(utils.TestCase):
FAKE_ENV = {
'CINDER_USERNAME': 'username',
'CINDER_PASSWORD': 'password',
'CINDER_PROJECT_ID': 'project_id',
'OS_VOLUME_API_VERSION': '3',
'CINDER_URL': keystone_client.BASE_URL,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
"""Run before each test."""
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(fixtures.EnvironmentVariable(var,
self.FAKE_ENV[var]))
self.mock_completion()
self.shell = shell.OpenStackCinderShell()
self.requests = self.useFixture(requests_mock_fixture.Fixture())
self.requests.register_uri(
'GET', keystone_client.BASE_URL,
text=keystone_client.keystone_request_callback)
self.cs = mock.Mock()
def run_command(self, cmd):
# Ensure the version negotiation indicates that
# all versions are supported
with mock.patch('cinderclient.api_versions._get_server_version_range',
return_value=(api_versions.APIVersion('3.0'),
api_versions.APIVersion('3.99'))):
self.shell.main(cmd.split())
def assert_called(self, method, url, body=None,
partial_body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body,
partial_body, **kwargs)
def assert_call_contained(self, url_part):
self.shell.cs.assert_in_call(url_part)
@ddt.data({'resource': None, 'query_url': None},
{'resource': 'volume', 'query_url': '?resource=volume'},
{'resource': 'group', 'query_url': '?resource=group'})
@ddt.unpack
def test_list_filters(self, resource, query_url):
url = '/resource_filters'
if resource is not None:
url += query_url
self.run_command('--os-volume-api-version 3.33 '
'list-filters --resource=%s' % resource)
else:
self.run_command('--os-volume-api-version 3.33 list-filters')
self.assert_called('GET', url)
@ddt.data(
# testcases for list volume
{'command':
'list --name=123 --filters name=456',
'expected':
'/volumes/detail?name=456'},
{'command':
'list --filters name=123',
'expected':
'/volumes/detail?name=123'},
{'command':
'list --filters metadata={key1:value1}',
'expected':
'/volumes/detail?metadata=%7B%27key1%27%3A+%27value1%27%7D'},
{'command':
'list --filters name~=456',
'expected':
'/volumes/detail?name~=456'},
{'command':
u'list --filters name~=ฮฃ',
'expected':
'/volumes/detail?name~=%CE%A3'},
{'command':
u'list --filters name=abc --filters size=1',
'expected':
'/volumes/detail?name=abc&size=1'},
{'command':
u'list --filters created_at=lt:2020-01-15T00:00:00',
'expected':
'/volumes/detail?created_at=lt%3A2020-01-15T00%3A00%3A00'},
{'command':
u'list --filters updated_at=gte:2020-02-01T00:00:00,'
u'lt:2020-03-01T00:00:00',
'expected':
'/volumes/detail?updated_at=gte%3A2020-02-01T00%3A00%3A00%2C'
'lt%3A2020-03-01T00%3A00%3A00'},
{'command':
u'list --filters updated_at=gte:2020-02-01T00:00:00,'
u'lt:2020-03-01T00:00:00 --filters created_at='
u'lt:2020-01-15T00:00:00',
'expected':
'/volumes/detail?created_at=lt%3A2020-01-15T00%3A00%3A00'
'&updated_at=gte%3A2020-02-01T00%3A00%3A00%2C'
'lt%3A2020-03-01T00%3A00%3A00'},
# testcases for list group
{'command':
'group-list --filters name=456',
'expected':
'/groups/detail?name=456'},
{'command':
'group-list --filters status=available',
'expected':
'/groups/detail?status=available'},
{'command':
'group-list --filters name~=456',
'expected':
'/groups/detail?name~=456'},
{'command':
'group-list --filters name=abc --filters status=available',
'expected':
'/groups/detail?name=abc&status=available'},
# testcases for list group-snapshot
{'command':
'group-snapshot-list --status=error --filters status=available',
'expected':
'/group_snapshots/detail?status=available'},
{'command':
'group-snapshot-list --filters availability_zone=123',
'expected':
'/group_snapshots/detail?availability_zone=123'},
{'command':
'group-snapshot-list --filters status~=available',
'expected':
'/group_snapshots/detail?status~=available'},
{'command':
'group-snapshot-list --filters status=available '
'--filters availability_zone=123',
'expected':
'/group_snapshots/detail?availability_zone=123&status=available'},
# testcases for list message
{'command':
'message-list --event_id=123 --filters event_id=456',
'expected':
'/messages?event_id=456'},
{'command':
'message-list --filters request_id=123',
'expected':
'/messages?request_id=123'},
{'command':
'message-list --filters request_id~=123',
'expected':
'/messages?request_id~=123'},
{'command':
'message-list --filters request_id=123 --filters event_id=456',
'expected':
'/messages?event_id=456&request_id=123'},
# testcases for list attachment
{'command':
'attachment-list --volume-id=123 --filters volume_id=456',
'expected':
'/attachments?volume_id=456'},
{'command':
'attachment-list --filters mountpoint=123',
'expected':
'/attachments?mountpoint=123'},
{'command':
'attachment-list --filters volume_id~=456',
'expected':
'/attachments?volume_id~=456'},
{'command':
'attachment-list --filters volume_id=123 '
'--filters mountpoint=456',
'expected':
'/attachments?mountpoint=456&volume_id=123'},
# testcases for list backup
{'command':
'backup-list --volume-id=123 --filters volume_id=456',
'expected':
'/backups/detail?volume_id=456'},
{'command':
'backup-list --filters name=123',
'expected':
'/backups/detail?name=123'},
{'command':
'backup-list --filters volume_id~=456',
'expected':
'/backups/detail?volume_id~=456'},
{'command':
'backup-list --filters volume_id=123 --filters name=456',
'expected':
'/backups/detail?name=456&volume_id=123'},
# testcases for list snapshot
{'command':
'snapshot-list --volume-id=123 --filters volume_id=456',
'expected':
'/snapshots/detail?volume_id=456'},
{'command':
'snapshot-list --filters name=123',
'expected':
'/snapshots/detail?name=123'},
{'command':
'snapshot-list --filters volume_id~=456',
'expected':
'/snapshots/detail?volume_id~=456'},
{'command':
'snapshot-list --filters volume_id=123 --filters name=456',
'expected':
'/snapshots/detail?name=456&volume_id=123'},
# testcases for get pools
{'command':
'get-pools --filters name=456 --detail',
'expected':
'/scheduler-stats/get_pools?detail=True&name=456'},
{'command':
'get-pools --filters name=456',
'expected':
'/scheduler-stats/get_pools?name=456'},
{'command':
'get-pools --filters name=456 --filters detail=True',
'expected':
'/scheduler-stats/get_pools?detail=True&name=456'}
)
@ddt.unpack
def test_list_with_filters_mixed(self, command, expected):
self.run_command('--os-volume-api-version 3.33 %s' % command)
self.assert_called('GET', expected)
def test_list(self):
self.run_command('list')
# NOTE(jdg): we default to detail currently
self.assert_called('GET', '/volumes/detail')
def test_list_with_with_count(self):
self.run_command('--os-volume-api-version 3.45 list --with-count')
self.assert_called('GET', '/volumes/detail?with_count=True')
def test_summary(self):
self.run_command('--os-volume-api-version 3.12 summary')
self.assert_called('GET', '/volumes/summary')
def test_list_with_group_id_before_3_10(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'list --group_id fake_id')
def test_type_list_with_filters_invalid(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.51 type-list '
'--filters key=value')
def test_type_list_with_filters(self):
self.run_command('--os-volume-api-version 3.52 type-list '
'--filters extra_specs={key:value}')
self.assert_called('GET', mock.ANY)
self.assert_call_contained(
parse.urlencode(
{'extra_specs':
{six.text_type('key'): six.text_type('value')}}))
self.assert_call_contained(parse.urlencode({'is_public': None}))
def test_type_list_public(self):
self.run_command('--os-volume-api-version 3.52 type-list '
'--filters is_public=True')
self.assert_called('GET', '/types?is_public=True')
def test_type_list_private(self):
self.run_command('--os-volume-api-version 3.52 type-list '
'--filters is_public=False')
self.assert_called('GET', '/types?is_public=False')
def test_type_list_public_private(self):
self.run_command('--os-volume-api-version 3.52 type-list')
self.assert_called('GET', '/types?is_public=None')
@ddt.data("3.10", "3.11")
def test_list_with_group_id_after_3_10(self, version):
command = ('--os-volume-api-version %s list --group_id fake_id' %
version)
self.run_command(command)
self.assert_called('GET', '/volumes/detail?group_id=fake_id')
@mock.patch("cinderclient.utils.print_list")
def test_list_duplicate_fields(self, mock_print):
self.run_command('list --field Status,id,Size,status')
self.assert_called('GET', '/volumes/detail')
key_list = ['ID', 'Status', 'Size']
mock_print.assert_called_once_with(mock.ANY, key_list,
exclude_unavailable=True, sortby_index=0)
@mock.patch("cinderclient.shell.OpenStackCinderShell.downgrade_warning")
def test_list_version_downgrade(self, mock_warning):
self.run_command('--os-volume-api-version 3.998 list')
mock_warning.assert_called_once_with(
api_versions.APIVersion('3.998'),
api_versions.APIVersion(api_versions.MAX_VERSION)
)
def test_list_availability_zone(self):
self.run_command('availability-zone-list')
self.assert_called('GET', '/os-availability-zone')
@ddt.data({'cmd': '1234 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234'}},
{'cmd': '1234 1233 '
'--connect True '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'instance_uuid': '1233',
'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'},
'volume_uuid': '1234'}},
{'cmd': 'abc 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234'}})
@mock.patch('cinderclient.utils.find_resource')
@ddt.unpack
def test_attachment_create(self, mock_find_volume, cmd, body):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234'},
loaded=True)
command = '--os-volume-api-version 3.27 attachment-create '
command += cmd
self.run_command(command)
expected = {'attachment': body}
self.assertTrue(mock_find_volume.called)
self.assert_called('POST', '/attachments', body=expected)
@ddt.data({'cmd': '1234 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234',
'mode': 'ro'}},
{'cmd': '1234 1233 '
'--connect True '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'instance_uuid': '1233',
'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'},
'volume_uuid': '1234',
'mode': 'ro'}},
{'cmd': 'abc 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234',
'mode': 'ro'}})
@mock.patch('cinderclient.utils.find_resource')
@ddt.unpack
def test_attachment_create_with_mode(self, mock_find_volume, cmd, body):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234'},
loaded=True)
command = ('--os-volume-api-version 3.54 '
'attachment-create '
'--mode ro ')
command += cmd
self.run_command(command)
expected = {'attachment': body}
self.assertTrue(mock_find_volume.called)
self.assert_called('POST', '/attachments', body=expected)
@mock.patch.object(volumes.VolumeManager, 'findall')
def test_attachment_create_duplicate_name_vol(self, mock_findall):
found = [volumes.Volume(self, {'id': '7654', 'name': 'abc'},
loaded=True),
volumes.Volume(self, {'id': '9876', 'name': 'abc'},
loaded=True)]
mock_findall.return_value = found
self.assertRaises(exceptions.CommandError,
self.run_command,
'--os-volume-api-version 3.27 '
'attachment-create abc 789')
@ddt.data({'cmd': '',
'expected': ''},
{'cmd': '--volume-id 1234',
'expected': '?volume_id=1234'},
{'cmd': '--status error',
'expected': '?status=error'},
{'cmd': '--all-tenants 1',
'expected': '?all_tenants=1'},
{'cmd': '--all-tenants 1 --volume-id 12345',
'expected': '?all_tenants=1&volume_id=12345'},
{'cmd': '--all-tenants 1 --tenant 12345',
'expected': '?all_tenants=1&project_id=12345'},
{'cmd': '--tenant 12345',
'expected': '?all_tenants=1&project_id=12345'}
)
@ddt.unpack
def test_attachment_list(self, cmd, expected):
command = '--os-volume-api-version 3.27 attachment-list '
command += cmd
self.run_command(command)
self.assert_called('GET', '/attachments%s' % expected)
@mock.patch('cinderclient.utils.print_list')
@mock.patch.object(cinderclient.v3.attachments.VolumeAttachmentManager,
'list')
def test_attachment_list_setattr(self, mock_list, mock_print):
command = '--os-volume-api-version 3.27 attachment-list '
fake_attachment = [attachments.VolumeAttachment(mock.ANY, attachment)
for attachment in fakes.fake_attachment_list['attachments']]
mock_list.return_value = fake_attachment
self.run_command(command)
for attach in fake_attachment:
setattr(attach, 'server_id', getattr(attach, 'instance'))
columns = ['ID', 'Volume ID', 'Status', 'Server ID']
mock_print.assert_called_once_with(fake_attachment, columns,
sortby_index=0)
def test_revert_to_snapshot(self):
original = cinderclient_utils.find_resource
def find_resource(manager, name_or_id, **kwargs):
if isinstance(manager, volume_snapshots.SnapshotManager):
return volume_snapshots.Snapshot(self,
{'id': '5678',
'volume_id': '1234'})
return original(manager, name_or_id, **kwargs)
with mock.patch('cinderclient.utils.find_resource',
side_effect=find_resource):
self.run_command(
'--os-volume-api-version 3.40 revert-to-snapshot 5678')
self.assert_called('POST', '/volumes/1234/action',
body={'revert': {'snapshot_id': '5678'}})
def test_attachment_show(self):
self.run_command('--os-volume-api-version 3.27 attachment-show 1234')
self.assert_called('GET', '/attachments/1234')
@ddt.data({'cmd': '1234 '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'}}})
@ddt.unpack
def test_attachment_update(self, cmd, body):
command = '--os-volume-api-version 3.27 attachment-update '
command += cmd
self.run_command(command)
self.assert_called('PUT', '/attachments/1234', body={'attachment':
body})
@ddt.unpack
def test_attachment_complete(self):
command = '--os-volume-api-version 3.44 attachment-complete 1234'
self.run_command(command)
self.assert_called('POST', '/attachments/1234/action', body=None)
def test_attachment_delete(self):
self.run_command('--os-volume-api-version 3.27 '
'attachment-delete 1234')
self.assert_called('DELETE', '/attachments/1234')
def test_upload_to_image(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image'}}
self.run_command('upload-to-image 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_upload_to_image_private_not_protected(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image',
'protected': False,
'visibility': 'private'}}
self.run_command('--os-volume-api-version 3.1 '
'upload-to-image 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_upload_to_image_public_protected(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image',
'protected': 'True',
'visibility': 'public'}}
self.run_command('--os-volume-api-version 3.1 '
'upload-to-image --visibility=public '
'--protected=True 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_backup_update(self):
self.run_command('--os-volume-api-version 3.9 '
'backup-update --name new_name 1234')
expected = {'backup': {'name': 'new_name'}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_list_with_with_count(self):
self.run_command(
'--os-volume-api-version 3.45 backup-list --with-count')
self.assert_called('GET', '/backups/detail?with_count=True')
def test_backup_update_with_description(self):
self.run_command('--os-volume-api-version 3.9 '
'backup-update 1234 --description=new-description')
expected = {'backup': {'description': 'new-description'}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_with_metadata(self):
cmd = '--os-volume-api-version 3.43 '
cmd += 'backup-update '
cmd += '--metadata foo=bar '
cmd += '1234'
self.run_command(cmd)
expected = {'backup': {'metadata': {'foo': 'bar'}}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_all(self):
# rename and change description
self.run_command('--os-volume-api-version 3.43 '
'backup-update --name new-name '
'--description=new-description '
'--metadata foo=bar 1234')
expected = {'backup': {
'name': 'new-name',
'description': 'new-description',
'metadata': {'foo': 'bar'}
}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_without_arguments(self):
# Call rename with no arguments
self.assertRaises(SystemExit, self.run_command,
'--os-volume-api-version 3.9 backup-update')
def test_backup_update_bad_request(self):
self.assertRaises(exceptions.ClientException,
self.run_command,
'--os-volume-api-version 3.9 backup-update 1234')
def test_backup_update_wrong_version(self):
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.8 '
'backup-update --name new-name 1234')
def test_group_type_list(self):
self.run_command('--os-volume-api-version 3.11 group-type-list')
self.assert_called_anytime('GET', '/group_types?is_public=None')
def test_group_type_list_public(self):
self.run_command('--os-volume-api-version 3.52 group-type-list '
'--filters is_public=True')
self.assert_called('GET', '/group_types?is_public=True')
def test_group_type_list_private(self):
self.run_command('--os-volume-api-version 3.52 group-type-list '
'--filters is_public=False')
self.assert_called('GET', '/group_types?is_public=False')
def test_group_type_list_public_private(self):
self.run_command('--os-volume-api-version 3.52 group-type-list')
self.assert_called('GET', '/group_types?is_public=None')
def test_group_type_show(self):
self.run_command('--os-volume-api-version 3.11 '
'group-type-show 1')
self.assert_called('GET', '/group_types/1')
def test_group_type_create(self):
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-1')
self.assert_called('POST', '/group_types')
def test_group_type_create_public(self):
expected = {'group_type': {'name': 'test-type-1',
'description': 'test_type-1-desc',
'is_public': True}}
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-1 '
'--description=test_type-1-desc '
'--is-public=True')
self.assert_called('POST', '/group_types', body=expected)
def test_group_type_create_private(self):
expected = {'group_type': {'name': 'test-type-3',
'description': 'test_type-3-desc',
'is_public': False}}
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-3 '
'--description=test_type-3-desc '
'--is-public=False')
self.assert_called('POST', '/group_types', body=expected)
def test_group_specs_list(self):
self.run_command('--os-volume-api-version 3.11 group-specs-list')
self.assert_called('GET', '/group_types?is_public=None')
def test_create_volume_with_group(self):
self.run_command('--os-volume-api-version 3.13 create --group-id 5678 '
'--volume-type 4321 1')
self.assert_called('GET', '/volumes/1234')
expected = {'volume': {'imageRef': None,
'size': 1,
'availability_zone': None,
'source_volid': None,
'consistencygroup_id': None,
'group_id': '5678',
'name': None,
'snapshot_id': None,
'metadata': {},
'volume_type': '4321',
'description': None,
'backup_id': None}}
self.assert_called_anytime('POST', '/volumes', expected)
@ddt.data({'cmd': '--os-volume-api-version 3.47 create --backup-id 1234',
'update': {'backup_id': '1234'}},
{'cmd': '--os-volume-api-version 3.47 create 2',
'update': {'size': 2}}
)
@ddt.unpack
def test_create_volume_with_backup(self, cmd, update):
self.run_command(cmd)
self.assert_called('GET', '/volumes/1234')
expected = {'volume': {'imageRef': None,
'size': None,
'availability_zone': None,
'source_volid': None,
'consistencygroup_id': None,
'name': None,
'snapshot_id': None,
'metadata': {},
'volume_type': None,
'description': None,
'backup_id': None}}
expected['volume'].update(update)
self.assert_called_anytime('POST', '/volumes', body=expected)
def test_group_list(self):
self.run_command('--os-volume-api-version 3.13 group-list')
self.assert_called_anytime('GET', '/groups/detail')
def test_group_list__with_all_tenant(self):
self.run_command(
'--os-volume-api-version 3.13 group-list --all-tenants')
self.assert_called_anytime('GET', '/groups/detail?all_tenants=1')
def test_group_show(self):
self.run_command('--os-volume-api-version 3.13 '
'group-show 1234')
self.assert_called('GET', '/groups/1234')
def test_group_show_with_list_volume(self):
self.run_command('--os-volume-api-version 3.25 '
'group-show 1234 --list-volume')
self.assert_called('GET', '/groups/1234?list_volume=True')
@ddt.data(True, False)
def test_group_delete(self, delete_vol):
cmd = '--os-volume-api-version 3.13 group-delete 1234'
if delete_vol:
cmd += ' --delete-volumes'
self.run_command(cmd)
expected = {'delete': {'delete-volumes': delete_vol}}
self.assert_called('POST', '/groups/1234/action', expected)
def test_group_create(self):
expected = {'group': {'name': 'test-1',
'description': 'test-1-desc',
'group_type': 'my_group_type',
'volume_types': ['type1', 'type2'],
'availability_zone': 'zone1'}}
self.run_command('--os-volume-api-version 3.13 '
'group-create --name test-1 '
'--description test-1-desc '
'--availability-zone zone1 '
'my_group_type type1,type2')
self.assert_called_anytime('POST', '/groups', body=expected)
def test_group_update(self):
self.run_command('--os-volume-api-version 3.13 group-update '
'--name group2 --description desc2 '
'--add-volumes uuid1,uuid2 '
'--remove-volumes uuid3,uuid4 '
'1234')
expected = {'group': {'name': 'group2',
'description': 'desc2',
'add_volumes': 'uuid1,uuid2',
'remove_volumes': 'uuid3,uuid4'}}
self.assert_called('PUT', '/groups/1234',
body=expected)
def test_group_update_invalid_args(self):
self.assertRaises(exceptions.ClientException,
self.run_command,
'--os-volume-api-version 3.13 group-update 1234')
def test_group_snapshot_list(self):
self.run_command('--os-volume-api-version 3.14 group-snapshot-list')
self.assert_called_anytime('GET',
'/group_snapshots/detail')
def test_group_snapshot_show(self):
self.run_command('--os-volume-api-version 3.14 '
'group-snapshot-show 1234')
self.assert_called('GET', '/group_snapshots/1234')
def test_group_snapshot_delete(self):
cmd = '--os-volume-api-version 3.14 group-snapshot-delete 1234'
self.run_command(cmd)
self.assert_called('DELETE', '/group_snapshots/1234')
def test_group_snapshot_create(self):
expected = {'group_snapshot': {'name': 'test-1',
'description': 'test-1-desc',
'group_id': '1234'}}
self.run_command('--os-volume-api-version 3.14 '
'group-snapshot-create --name test-1 '
'--description test-1-desc 1234')
self.assert_called_anytime('POST', '/group_snapshots', body=expected)
@ddt.data(
{'grp_snap_id': '1234', 'src_grp_id': None,
'src': '--group-snapshot 1234'},
{'grp_snap_id': None, 'src_grp_id': '1234',
'src': '--source-group 1234'},
)
@ddt.unpack
def test_group_create_from_src(self, grp_snap_id, src_grp_id, src):
expected = {'create-from-src': {'name': 'test-1',
'description': 'test-1-desc'}}
if grp_snap_id:
expected['create-from-src']['group_snapshot_id'] = grp_snap_id
elif src_grp_id:
expected['create-from-src']['source_group_id'] = src_grp_id
cmd = ('--os-volume-api-version 3.14 '
'group-create-from-src --name test-1 '
'--description test-1-desc ')
cmd += src
self.run_command(cmd)
self.assert_called_anytime('POST', '/groups/action', body=expected)
def test_volume_manageable_list(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost')
self.assert_called('GET', '/manageable_volumes/detail?host=fakehost')
def test_volume_manageable_list_details(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost --detailed True')
self.assert_called('GET', '/manageable_volumes/detail?host=fakehost')
def test_volume_manageable_list_no_details(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost --detailed False')
self.assert_called('GET', '/manageable_volumes?host=fakehost')
def test_volume_manageable_list_cluster(self):
self.run_command('--os-volume-api-version 3.17 '
'manageable-list --cluster dest')
self.assert_called('GET', '/manageable_volumes/detail?cluster=dest')
def test_snapshot_manageable_list(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost')
self.assert_called('GET', '/manageable_snapshots/detail?host=fakehost')
def test_snapshot_manageable_list_details(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost --detailed True')
self.assert_called('GET', '/manageable_snapshots/detail?host=fakehost')
def test_snapshot_manageable_list_no_details(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost --detailed False')
self.assert_called('GET', '/manageable_snapshots?host=fakehost')
def test_snapshot_manageable_list_cluster(self):
self.run_command('--os-volume-api-version 3.17 '
'snapshot-manageable-list --cluster dest')
self.assert_called('GET', '/manageable_snapshots/detail?cluster=dest')
@ddt.data('', 'snapshot-')
def test_manageable_list_cluster_before_3_17(self, prefix):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.16 '
'%smanageable-list --cluster dest' % prefix)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
@ddt.data('', 'snapshot-')
def test_manageable_list_mutual_exclusion(self, prefix, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.17 '
'%smanageable-list fakehost --cluster dest' % prefix)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
@ddt.data('', 'snapshot-')
def test_manageable_list_missing_required(self, prefix, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.17 '
'%smanageable-list' % prefix)
def test_list_messages(self):
self.run_command('--os-volume-api-version 3.3 message-list')
self.assert_called('GET', '/messages')
@ddt.data('volume', 'backup', 'snapshot', None)
def test_reset_state_entity_not_found(self, entity_type):
cmd = 'reset-state 999999'
if entity_type is not None:
cmd += ' --type %s' % entity_type
self.assertRaises(exceptions.CommandError, self.run_command, cmd)
@ddt.data({'entity_types': [{'name': 'volume', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'backup', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'snapshot', 'version': '3.0',
'command': 'os-reset_status'},
{'name': None, 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'group', 'version': '3.20',
'command': 'reset_status'},
{'name': 'group-snapshot', 'version': '3.19',
'command': 'reset_status'}],
'r_id': ['1234'],
'states': ['available', 'error', None]},
{'entity_types': [{'name': 'volume', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'backup', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'snapshot', 'version': '3.0',
'command': 'os-reset_status'},
{'name': None, 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'group', 'version': '3.20',
'command': 'reset_status'},
{'name': 'group-snapshot', 'version': '3.19',
'command': 'reset_status'}],
'r_id': ['1234', '5678'],
'states': ['available', 'error', None]})
@ddt.unpack
def test_reset_state_normal(self, entity_types, r_id, states):
for state in states:
for t in entity_types:
if state is None:
expected = {t['command']: {}}
cmd = ('--os-volume-api-version '
'%s reset-state %s') % (t['version'],
' '.join(r_id))
else:
expected = {t['command']: {'status': state}}
cmd = ('--os-volume-api-version '
'%s reset-state '
'--state %s %s') % (t['version'],
state, ' '.join(r_id))
if t['name'] is not None:
cmd += ' --type %s' % t['name']
self.run_command(cmd)
name = t['name'] if t['name'] else 'volume'
for re in r_id:
self.assert_called_anytime('POST', '/%ss/%s/action'
% (name.replace('-', '_'), re),
body=expected)
@ddt.data({'command': '--attach-status detached',
'expected': {'attach_status': 'detached'}},
{'command': '--state in-use --attach-status attached',
'expected': {'status': 'in-use',
'attach_status': 'attached'}},
{'command': '--reset-migration-status',
'expected': {'migration_status': 'none'}})
@ddt.unpack
def test_reset_state_volume_additional_status(self, command, expected):
self.run_command('reset-state %s 1234' % command)
expected = {'os-reset_status': expected}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_snapshot_list_with_with_count(self):
self.run_command(
'--os-volume-api-version 3.45 snapshot-list --with-count')
self.assert_called('GET', '/snapshots/detail?with_count=True')
def test_snapshot_list_with_metadata(self):
self.run_command('--os-volume-api-version 3.22 '
'snapshot-list --metadata key1=val1')
expected = ("/snapshots/detail?metadata=%s"
% parse.quote_plus("{'key1': 'val1'}"))
self.assert_called('GET', expected)
@ddt.data(('resource_type',), ('event_id',), ('resource_uuid',),
('level', 'message_level'), ('request_id',))
def test_list_messages_with_filters(self, filter):
self.run_command('--os-volume-api-version 3.5 message-list --%s=TEST'
% filter[0])
self.assert_called('GET', '/messages?%s=TEST' % filter[-1])
def test_list_messages_with_sort(self):
self.run_command('--os-volume-api-version 3.5 '
'message-list --sort=id:asc')
self.assert_called('GET', '/messages?sort=id%3Aasc')
def test_list_messages_with_limit(self):
self.run_command('--os-volume-api-version 3.5 message-list --limit=1')
self.assert_called('GET', '/messages?limit=1')
def test_list_messages_with_marker(self):
self.run_command('--os-volume-api-version 3.5 message-list --marker=1')
self.assert_called('GET', '/messages?marker=1')
def test_list_with_image_metadata_before_3_4(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'list --image_metadata image_name=1234')
def test_list_filter_image_metadata(self):
self.run_command('--os-volume-api-version 3.4 '
'list --image_metadata image_name=1234')
url = ('/volumes/detail?%s' %
parse.urlencode([('glance_metadata', {"image_name": "1234"})]))
self.assert_called('GET', url)
def test_show_message(self):
self.run_command('--os-volume-api-version 3.5 message-show 1234')
self.assert_called('GET', '/messages/1234')
def test_delete_message(self):
self.run_command('--os-volume-api-version 3.5 message-delete 1234')
self.assert_called('DELETE', '/messages/1234')
def test_delete_messages(self):
self.run_command(
'--os-volume-api-version 3.3 message-delete 1234 12345')
self.assert_called_anytime('DELETE', '/messages/1234')
self.assert_called_anytime('DELETE', '/messages/12345')
@mock.patch('cinderclient.utils.find_resource')
def test_delete_metadata(self, mock_find_volume):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234',
'metadata':
{'k1': 'v1',
'k2': 'v2',
'k3': 'v3'}},
loaded = True)
expected = {'metadata': {'k2': 'v2'}}
self.run_command('--os-volume-api-version 3.15 '
'metadata 1234 unset k1 k3')
self.assert_called('PUT', '/volumes/1234/metadata', body=expected)
@ddt.data(("3.0", None), ("3.6", None),
("3.7", True), ("3.7", False), ("3.7", ""))
@ddt.unpack
def test_service_list_withreplication(self, version, replication):
command = ('--os-volume-api-version %s service-list' %
version)
if replication is not None:
command += ' --withreplication %s' % replication
self.run_command(command)
self.assert_called('GET', '/os-services')
def test_group_enable_replication(self):
cmd = '--os-volume-api-version 3.38 group-enable-replication 1234'
self.run_command(cmd)
expected = {'enable_replication': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
def test_group_disable_replication(self):
cmd = '--os-volume-api-version 3.38 group-disable-replication 1234'
self.run_command(cmd)
expected = {'disable_replication': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
@ddt.data((False, None), (True, None),
(False, "backend1"), (True, "backend1"),
(False, "default"), (True, "default"))
@ddt.unpack
def test_group_failover_replication(self, attach_vol, backend):
attach = '--allow-attached-volume ' if attach_vol else ''
backend_id = ('--secondary-backend-id ' + backend) if backend else ''
cmd = ('--os-volume-api-version 3.38 '
'group-failover-replication 1234 ' + attach + backend_id)
self.run_command(cmd)
expected = {'failover_replication':
{'allow_attached_volume': attach_vol,
'secondary_backend_id': backend if backend else None}}
self.assert_called('POST', '/groups/1234/action', body=expected)
def test_group_list_replication_targets(self):
cmd = ('--os-volume-api-version 3.38 group-list-replication-targets'
' 1234')
self.run_command(cmd)
expected = {'list_replication_targets': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
def test_service_get_log_before_3_32(self, get_levels_mock):
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.28 '
'service-get-log')
get_levels_mock.assert_not_called()
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
@mock.patch('cinderclient.utils.print_list')
def test_service_get_log_no_params(self, print_mock, get_levels_mock):
self.run_command('--os-volume-api-version 3.32 service-get-log')
get_levels_mock.assert_called_once_with('', '', '')
print_mock.assert_called_once_with(get_levels_mock.return_value,
('Binary', 'Host', 'Prefix',
'Level'))
@ddt.data('*', 'cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-backup')
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
@mock.patch('cinderclient.utils.print_list')
def test_service_get_log(self, binary, print_mock, get_levels_mock):
server = 'host1'
prefix = 'sqlalchemy'
self.run_command('--os-volume-api-version 3.32 service-get-log '
'--binary %s --server %s --prefix %s' % (
binary, server, prefix))
get_levels_mock.assert_called_once_with(binary, server, prefix)
print_mock.assert_called_once_with(get_levels_mock.return_value,
('Binary', 'Host', 'Prefix',
'Level'))
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_before_3_32(self, set_levels_mock):
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.28 '
'service-set-log debug')
set_levels_mock.assert_not_called()
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_service_set_log_missing_required(self, error_mock,
set_levels_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.32 '
'service-set-log')
set_levels_mock.assert_not_called()
# Different error message from argparse library in Python 2 and 3
if six.PY3:
msg = 'the following arguments are required: <log-level>'
else:
msg = 'too few arguments'
error_mock.assert_called_once_with(msg)
@ddt.data('debug', 'DEBUG', 'info', 'INFO', 'warning', 'WARNING', 'error',
'ERROR')
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_min_params(self, level, set_levels_mock):
self.run_command('--os-volume-api-version 3.32 '
'service-set-log %s' % level)
set_levels_mock.assert_called_once_with(level, '', '', '')
@ddt.data('*', 'cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-backup')
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_levels(self, binary, set_levels_mock):
level = 'debug'
server = 'host1'
prefix = 'sqlalchemy.'
self.run_command('--os-volume-api-version 3.32 '
'service-set-log %s --binary %s --server %s '
'--prefix %s' % (level, binary, server, prefix))
set_levels_mock.assert_called_once_with(level, binary, server, prefix)
@mock.patch('cinderclient.shell_utils._poll_for_status')
def test_create_with_poll(self, poll_method):
self.run_command('create --poll 1')
self.assert_called_anytime('GET', '/volumes/1234')
volume = self.shell.cs.volumes.get('1234')
info = dict()
info.update(volume._info)
self.assertEqual(1, poll_method.call_count)
timeout_period = 3600
poll_method.assert_has_calls([mock.call(self.shell.cs.volumes.get,
1234, info, 'creating', ['available'], timeout_period,
self.shell.cs.client.global_request_id,
self.shell.cs.messages)])
@mock.patch('cinderclient.shell_utils.time')
def test_poll_for_status(self, mock_time):
poll_period = 2
some_id = "some-id"
global_request_id = "req-someid"
action = "some"
updated_objects = (
base.Resource(None, info={"not_default_field": "creating"}),
base.Resource(None, info={"not_default_field": "available"}))
poll_fn = mock.MagicMock(side_effect=updated_objects)
cinderclient.shell_utils._poll_for_status(
poll_fn = poll_fn,
obj_id = some_id,
global_request_id = global_request_id,
messages = base.Resource(None, {}),
info = {},
action = action,
status_field = "not_default_field",
final_ok_states = ['available'],
timeout_period=3600)
self.assertEqual([mock.call(poll_period)] * 2,
mock_time.sleep.call_args_list)
self.assertEqual([mock.call(some_id)] * 2, poll_fn.call_args_list)
@mock.patch('cinderclient.v3.messages.MessageManager.list')
@mock.patch('cinderclient.shell_utils.time')
def test_poll_for_status_error(self, mock_time, mock_message_list):
poll_period = 2
some_id = "some_id"
global_request_id = "req-someid"
action = "some"
updated_objects = (
base.Resource(None, info={"not_default_field": "creating"}),
base.Resource(None, info={"not_default_field": "error"}))
poll_fn = mock.MagicMock(side_effect=updated_objects)
msg_object = base.Resource(cinderclient.v3.messages.MessageManager,
info = {"user_message": "ERROR!"})
mock_message_list.return_value = (msg_object,)
self.assertRaises(exceptions.ResourceInErrorState,
cinderclient.shell_utils._poll_for_status,
poll_fn=poll_fn,
obj_id=some_id,
global_request_id=global_request_id,
messages=cinderclient.v3.messages.MessageManager(api=3.34),
info=dict(),
action=action,
final_ok_states=['available'],
status_field="not_default_field",
timeout_period=3600)
self.assertEqual([mock.call(poll_period)] * 2,
mock_time.sleep.call_args_list)
self.assertEqual([mock.call(some_id)] * 2, poll_fn.call_args_list)
def test_backup(self):
self.run_command('--os-volume-api-version 3.42 backup-create '
'--name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
}}
self.assert_called('POST', '/backups', body=expected)
def test_backup_with_metadata(self):
self.run_command('--os-volume-api-version 3.43 backup-create '
'--metadata foo=bar --name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
'metadata': {'foo': 'bar'}, }}
self.assert_called('POST', '/backups', body=expected)
def test_backup_with_az(self):
self.run_command('--os-volume-api-version 3.51 backup-create '
'--availability-zone AZ2 --name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
'availability_zone': 'AZ2'}}
self.assert_called('POST', '/backups', body=expected)
@mock.patch("cinderclient.utils.print_list")
def test_snapshot_list_with_userid(self, mock_print_list):
"""Ensure 3.41 provides User ID header."""
self.run_command('--os-volume-api-version 3.41 snapshot-list')
self.assert_called('GET', '/snapshots/detail')
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'User ID']
mock_print_list.assert_called_once_with(mock.ANY, columns,
sortby_index=0)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_before_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.15 '
'migrate 1234 fakehost')
v3_migrate_mock.assert_called_once_with(
'fakehost', False, False, None)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
self.assertEqual(4, len(v3_migrate_mock.call_args[0]))
def test_migrate_volume_with_cluster_before_3_16(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.15 '
'migrate 1234 fakehost --cluster fakecluster')
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_mutual_exclusion(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234 fakehost --cluster fakecluster')
msg = 'argument --cluster: not allowed with argument <host>'
error_mock.assert_called_once_with(msg)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_missing_required(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234')
msg = 'one of the arguments <host> --cluster is required'
error_mock.assert_called_once_with(msg)
def test_migrate_volume_host(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_cluster(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 --cluster mycluster')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'cluster': 'mycluster'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy '
'--lock-volume')
expected = {'os-migrate_volume': {'force_host_copy': True,
'lock_volume': True,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force_false(self):
# Set both --force-host-copy and --lock-volume to False.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy=False '
'--lock-volume=False')
expected = {'os-migrate_volume': {'force_host_copy': 'False',
'lock_volume': 'False',
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
# Do not set the values to --force-host-copy and --lock-volume.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action',
body=expected)
@ddt.data({'bootable': False, 'by_id': False, 'cluster': None},
{'bootable': True, 'by_id': False, 'cluster': None},
{'bootable': False, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': 'clustername'})
@ddt.unpack
def test_volume_manage(self, bootable, by_id, cluster):
cmd = ('--os-volume-api-version 3.16 '
'manage host1 some_fake_name --name foo --description bar '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
if by_id:
cmd += ' --id-type source-id'
if bootable:
cmd += ' --bootable'
if cluster:
cmd += ' --cluster ' + cluster
self.run_command(cmd)
ref = 'source-id' if by_id else 'source-name'
expected = {'volume': {'host': 'host1',
'ref': {ref: 'some_fake_name'},
'name': 'foo',
'description': 'bar',
'volume_type': 'baz',
'availability_zone': 'az',
'metadata': {'k1': 'v1', 'k2': 'v2'},
'bootable': bootable}}
if cluster:
expected['volume']['cluster'] = cluster
self.assert_called_anytime('POST', '/os-volume-manage', body=expected)
def test_volume_manage_before_3_16(self):
"""Cluster optional argument was not acceptable."""
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'manage host1 some_fake_name '
'--cluster clustername'
'--name foo --description bar --bootable '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
def test_worker_cleanup_before_3_24(self):
self.assertRaises(SystemExit,
self.run_command,
'work-cleanup fakehost')
def test_worker_cleanup(self):
self.run_command('--os-volume-api-version 3.24 '
'work-cleanup --cluster clustername --host hostname '
'--binary binaryname --is-up false --disabled true '
'--resource-id uuid --resource-type Volume '
'--service-id 1')
expected = {'cluster_name': 'clustername',
'host': 'hostname',
'binary': 'binaryname',
'is_up': 'false',
'disabled': 'true',
'resource_id': 'uuid',
'resource_type': 'Volume',
'service_id': 1}
self.assert_called('POST', '/workers/cleanup', body=expected)
def test_create_transfer(self):
self.run_command('transfer-create 1234')
expected = {'transfer': {'volume_id': 1234,
'name': None,
}}
self.assert_called('POST', '/os-volume-transfer', body=expected)
def test_create_transfer_no_snaps(self):
self.run_command('--os-volume-api-version 3.55 transfer-create '
'--no-snapshots 1234')
expected = {'transfer': {'volume_id': 1234,
'name': None,
'no_snapshots': True
}}
self.assert_called('POST', '/volume-transfers', body=expected)
def test_list_transfer_sorty_not_sorty(self):
self.run_command(
'--os-volume-api-version 3.59 transfer-list')
url = ('/volume-transfers/detail')
self.assert_called('GET', url)
def test_subcommand_parser(self):
"""Ensure that all the expected commands show up.
This test ensures that refactoring code does not somehow result in
a command accidentally ceasing to exist.
TODO: add a similar test for 3.59 or so
"""
p = self.shell.get_subcommand_parser(api_versions.APIVersion("3.0"),
input_args=['help'], do_help=True)
help_text = p.format_help()
# These are v3.0 commands only
expected_commands = ('absolute-limits',
'api-version',
'availability-zone-list',
'backup-create',
'backup-delete',
'backup-export',
'backup-import',
'backup-list',
'backup-reset-state',
'backup-restore',
'backup-show',
'cgsnapshot-create',
'cgsnapshot-delete',
'cgsnapshot-list',
'cgsnapshot-show',
'consisgroup-create',
'consisgroup-create-from-src',
'consisgroup-delete',
'consisgroup-list',
'consisgroup-show',
'consisgroup-update',
'create',
'delete',
'encryption-type-create',
'encryption-type-delete',
'encryption-type-list',
'encryption-type-show',
'encryption-type-update',
'extend',
'extra-specs-list',
'failover-host',
'force-delete',
'freeze-host',
'get-capabilities',
'get-pools',
'image-metadata',
'image-metadata-show',
'list',
'manage',
'metadata',
'metadata-show',
'metadata-update-all',
'migrate',
'qos-associate',
'qos-create',
'qos-delete',
'qos-disassociate',
'qos-disassociate-all',
'qos-get-association',
'qos-key',
'qos-list',
'qos-show',
'quota-class-show',
'quota-class-update',
'quota-defaults',
'quota-delete',
'quota-show',
'quota-update',
'quota-usage',
'rate-limits',
'readonly-mode-update',
'rename',
'reset-state',
'retype',
'service-disable',
'service-enable',
'service-list',
'set-bootable',
'show',
'snapshot-create',
'snapshot-delete',
'snapshot-list',
'snapshot-manage',
'snapshot-metadata',
'snapshot-metadata-show',
'snapshot-metadata-update-all',
'snapshot-rename',
'snapshot-reset-state',
'snapshot-show',
'snapshot-unmanage',
'thaw-host',
'transfer-accept',
'transfer-create',
'transfer-delete',
'transfer-list',
'transfer-show',
'type-access-add',
'type-access-list',
'type-access-remove',
'type-create',
'type-default',
'type-delete',
'type-key',
'type-list',
'type-show',
'type-update',
'unmanage',
'upload-to-image',
'version-list',
'bash-completion',
'help',)
for e in expected_commands:
self.assertIn(' ' + e, help_text)
@ddt.data(
# testcases for list transfers
{'command':
'transfer-list --filters volume_id=456',
'expected':
'/os-volume-transfer/detail?volume_id=456'},
{'command':
'transfer-list --filters id=123',
'expected':
'/os-volume-transfer/detail?id=123'},
{'command':
'transfer-list --filters name=abc',
'expected':
'/os-volume-transfer/detail?name=abc'},
{'command':
'transfer-list --filters name=abc --filters volume_id=456',
'expected':
'/os-volume-transfer/detail?name=abc&volume_id=456'},
{'command':
'transfer-list --filters id=123 --filters volume_id=456',
'expected':
'/os-volume-transfer/detail?id=123&volume_id=456'},
{'command':
'transfer-list --filters id=123 --filters name=abc',
'expected':
'/os-volume-transfer/detail?id=123&name=abc'},
)
@ddt.unpack
def test_transfer_list_with_filters(self, command, expected):
self.run_command('--os-volume-api-version 3.52 %s' % command)
self.assert_called('GET', expected)
def test_default_type_set(self):
self.run_command('--os-volume-api-version 3.62 default-type-set '
'4c298f16-e339-4c80-b934-6cbfcb7525a0 '
'629632e7-99d2-4c40-9ae3-106fa3b1c9b7')
body = {
'default_type':
{
'volume_type': '4c298f16-e339-4c80-b934-6cbfcb7525a0'
}
}
self.assert_called(
'PUT', 'v3/default-types/629632e7-99d2-4c40-9ae3-106fa3b1c9b7',
body=body)
def test_default_type_list_project(self):
self.run_command('--os-volume-api-version 3.62 default-type-list '
'--project-id 629632e7-99d2-4c40-9ae3-106fa3b1c9b7')
self.assert_called(
'GET', 'v3/default-types/629632e7-99d2-4c40-9ae3-106fa3b1c9b7')
def test_default_type_list(self):
self.run_command('--os-volume-api-version 3.62 default-type-list')
self.assert_called('GET', 'v3/default-types')
def test_default_type_delete(self):
self.run_command('--os-volume-api-version 3.62 default-type-unset '
'629632e7-99d2-4c40-9ae3-106fa3b1c9b7')
self.assert_called(
'DELETE', 'v3/default-types/629632e7-99d2-4c40-9ae3-106fa3b1c9b7')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/conftest.py | """Test utilities."""
import os
from unittest import mock
from aioamqp.envelope import Envelope
from aioamqp.properties import Properties
from henson import Application
import pytest
from henson_amqp import AMQP
class Settings:
"""A container object for test settings."""
AMQP_HOST = os.environ.get('TEST_AMQP_HOST', 'localhost')
AMQP_INBOUND_QUEUE = 'test.in'
AMQP_INBOUND_QUEUE_DURABLE = True
AMQP_INBOUND_EXCHANGE = 'test.in'
AMQP_INBOUND_EXCHANGE_DURABLE = True
AMQP_OUTBOUND_EXCHANGE = 'test.in'
AMQP_OUTBOUND_EXCHANGE_DURABLE = True
AMQP_INBOUND_ROUTING_KEY = 'test.in'
AMQP_OUTBOUND_ROUTING_KEY = 'test.in'
@pytest.fixture
def test_app():
"""Return a test application."""
return Application('testing', Settings)
@pytest.fixture
def test_amqp(test_app):
"""Return an extension bound to the test app."""
return AMQP(test_app)
@pytest.fixture
def test_consumer(test_amqp):
"""Return a consumer created by the test AMQP instance."""
consumer = test_amqp.consumer()
return consumer
@pytest.fixture
def test_producer(test_amqp):
"""Return a producer created by the test AMQP instance."""
producer = test_amqp.producer()
return producer
@pytest.fixture
def test_envelope():
"""Return a mock aioamqp.envelope.Envelope."""
return mock.Mock(spec=Envelope)
@pytest.fixture
def test_properties():
"""Return a mock aioamqp.properties.Properties."""
return mock.Mock(spec=Properties)
| []
| []
| [
"TEST_AMQP_HOST"
]
| [] | ["TEST_AMQP_HOST"] | python | 1 | 0 | |
pkg/secrets/passphrase/manager_test.go | package passphrase
import (
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
const (
state = `
{"salt": "v1:fozI5u6B030=:v1:F+6ZduKKd8G0/V7L:PGMFeIzwobWRKmEAzUdaQHqC5mMRIQ=="}
`
brokenState = `
{"salt": "fozI5u6B030=:v1:F+6ZduL:PGMFeIzwobWRKmEAzUdaQHqC5mMRIQ=="}
`
)
func setIncorrectPassphraseTestEnvVars() func() {
clearCachedSecretsManagers()
oldPassphrase := os.Getenv("PULUMI_CONFIG_PASSPHRASE")
oldPassphraseFile := os.Getenv("PULUMI_CONFIG_PASSPHRASE_FILE")
os.Setenv("PULUMI_CONFIG_PASSPHRASE", "password123")
os.Unsetenv("PULUMI_CONFIG_PASSPHRASE_FILE")
return func() {
os.Setenv("PULUMI_CONFIG_PASSPHRASE", oldPassphrase)
os.Setenv("PULUMI_CONFIG_PASSPHRASE_FILE", oldPassphraseFile)
}
}
func TestPassphraseManagerIncorrectPassphraseReturnsErrorCrypter(t *testing.T) {
setupEnv := setIncorrectPassphraseTestEnvVars()
defer setupEnv()
manager, err := NewPromptingPassphaseSecretsManagerFromState([]byte(state))
assert.NoError(t, err) // even if we pass the wrong provider, we should get a lockedPassphraseProvider
assert.Equal(t, manager, &localSecretsManager{
state: localSecretsManagerState{Salt: "v1:fozI5u6B030=:v1:F+6ZduKKd8G0/V7L:PGMFeIzwobWRKmEAzUdaQHqC5mMRIQ=="},
crypter: &errorCrypter{},
})
}
func setCorrectPassphraseTestEnvVars() func() {
clearCachedSecretsManagers()
oldPassphrase := os.Getenv("PULUMI_CONFIG_PASSPHRASE")
oldPassphraseFile := os.Getenv("PULUMI_CONFIG_PASSPHRASE_FILE")
os.Setenv("PULUMI_CONFIG_PASSPHRASE", "password")
os.Unsetenv("PULUMI_CONFIG_PASSPHRASE_FILE")
return func() {
os.Setenv("PULUMI_CONFIG_PASSPHRASE", oldPassphrase)
os.Setenv("PULUMI_CONFIG_PASSPHRASE_FILE", oldPassphraseFile)
}
}
func TestPassphraseManagerIncorrectStateReturnsError(t *testing.T) {
setupEnv := setCorrectPassphraseTestEnvVars()
defer setupEnv()
_, err := NewPromptingPassphaseSecretsManagerFromState([]byte(brokenState))
assert.Error(t, err)
}
func TestPassphraseManagerCorrectPassphraseReturnsSecretsManager(t *testing.T) {
setupEnv := setCorrectPassphraseTestEnvVars()
defer setupEnv()
sm, _ := NewPromptingPassphaseSecretsManagerFromState([]byte(state))
assert.NotNil(t, sm)
}
func unsetAllPassphraseEnvVars() func() {
clearCachedSecretsManagers()
oldPassphrase := os.Getenv("PULUMI_CONFIG_PASSPHRASE")
oldPassphraseFile := os.Getenv("PULUMI_CONFIG_PASSPHRASE_FILE")
os.Unsetenv("PULUMI_CONFIG_PASSPHRASE")
os.Unsetenv("PULUMI_CONFIG_PASSPHRASE_FILE")
return func() {
os.Setenv("PULUMI_CONFIG_PASSPHRASE", oldPassphrase)
os.Setenv("PULUMI_CONFIG_PASSPHRASE_FILE", oldPassphraseFile)
}
}
func TestPassphraseManagerNoEnvironmentVariablesReturnsError(t *testing.T) {
setupEnv := unsetAllPassphraseEnvVars()
defer setupEnv()
_, err := NewPromptingPassphaseSecretsManagerFromState([]byte(state))
assert.NotNil(t, err, strings.Contains(err.Error(), "unable to find either `PULUMI_CONFIG_PASSPHRASE` nor "+
"`PULUMI_CONFIG_PASSPHRASE_FILE`"))
}
| [
"\"PULUMI_CONFIG_PASSPHRASE\"",
"\"PULUMI_CONFIG_PASSPHRASE_FILE\"",
"\"PULUMI_CONFIG_PASSPHRASE\"",
"\"PULUMI_CONFIG_PASSPHRASE_FILE\"",
"\"PULUMI_CONFIG_PASSPHRASE\"",
"\"PULUMI_CONFIG_PASSPHRASE_FILE\""
]
| []
| [
"PULUMI_CONFIG_PASSPHRASE_FILE",
"PULUMI_CONFIG_PASSPHRASE"
]
| [] | ["PULUMI_CONFIG_PASSPHRASE_FILE", "PULUMI_CONFIG_PASSPHRASE"] | go | 2 | 0 | |
main.go | package main
import (
"context"
"log"
"math"
"os"
"strconv"
"github.com/bluemediaapp/models"
"github.com/gofiber/fiber/v2"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var (
app = fiber.New()
client *mongo.Client
config *Config
mctx = context.Background()
videosCollection *mongo.Collection
likedVideosCollection *mongo.Collection
usersCollection *mongo.Collection
watchedVideosCollection *mongo.Collection
)
type VideoUpload struct {
Description string `form:"description"`
Series string `form:"series"`
}
func main() {
config = &Config{
port: os.Getenv("port"),
mongoUri: os.Getenv("mongo_uri"),
}
app.Get("/like/:video_id/:user_id", func(ctx *fiber.Ctx) error {
userId, err := strconv.ParseInt(ctx.Params("user_id"), 10, 64)
if err != nil {
return err
}
videoId, err := strconv.ParseInt(ctx.Params("video_id"), 10, 64)
if err != nil {
return err
}
if hasLiked(userId, videoId) {
_ = ctx.SendStatus(412)
_ = ctx.SendString("User has already liked this post")
return nil
}
user, err := getUser(userId)
if err != nil {
return err
}
video, err := getVideo(videoId)
if err != nil {
return err
}
err = likeVideo(user, video)
return err
})
app.Get("/watch/:video_id/:user_id", func(ctx *fiber.Ctx) error {
userId, err := strconv.ParseInt(ctx.Params("user_id"), 10, 64)
if err != nil {
return err
}
videoId, err := strconv.ParseInt(ctx.Params("video_id"), 10, 64)
if err != nil {
return err
}
if hasWatched(userId, videoId) {
_ = ctx.SendStatus(412)
_ = ctx.SendString("User has already watched this post")
return nil
}
user, err := getUser(userId)
if err != nil {
return err
}
video, err := getVideo(videoId)
if err != nil {
return err
}
err = watchVideo(user, video)
if err != nil {
return err
}
return nil
})
initDb()
log.Fatal(app.Listen(config.port))
}
func initDb() {
// Connect mongo
var err error
client, err = mongo.NewClient(options.Client().ApplyURI(config.mongoUri))
if err != nil {
log.Fatal(err)
}
err = client.Connect(mctx)
if err != nil {
log.Fatal(err)
}
// Setup tables
db := client.Database("blue")
videosCollection = db.Collection("video_metadata")
likedVideosCollection = db.Collection("liked_videos")
watchedVideosCollection = db.Collection("watched_videos")
usersCollection = db.Collection("users")
}
// Liking
func hasLiked(userId int64, videoId int64) bool {
filter := bson.D{{"user_id", userId}, {"video_id", videoId}}
var limit int64 = 1
documentCount, err := likedVideosCollection.CountDocuments(mctx, filter, &options.CountOptions{
Limit: &limit,
})
if err != nil {
return true
}
return documentCount == int64(1)
}
func likeVideo(user models.DatabaseUser, video models.DatabaseVideo) error {
// Duplicate checks
likeEvent := models.DatabaseLikeEvent{
VideoId: video.Id,
UserId: user.Id,
}
_, err := likedVideosCollection.InsertOne(mctx, likeEvent)
if err != nil {
return err
}
// Interests
interests := make(map[string]int64)
for _, tag := range video.Tags {
currentInterestValue, exists := user.Interests[tag]
if !exists {
currentInterestValue = 0
}
currentInterestValue += 11
interests[tag] = currentInterestValue
}
modifyInterests(user, interests)
// Like count
if video.Likes >= math.MaxInt64-1 {
log.Printf("Max likes on video %d", video.Id)
return err
}
_, err = videosCollection.UpdateOne(mctx, bson.D{{"_id", video.Id}}, bson.D{{"$inc", bson.D{{"likes", 1}}}})
if err != nil {
return err
}
return nil
}
// Watching
func watchVideo(user models.DatabaseUser, video models.DatabaseVideo) error {
watchEvent := models.DatabaseWatchEvent{
VideoId: video.Id,
UserId: user.Id,
}
_, err := watchedVideosCollection.InsertOne(mctx, watchEvent)
if err != nil {
return err
}
interests := make(map[string]int64)
for _, tag := range video.Tags {
currentInterestValue, exists := user.Interests[tag]
if !exists {
currentInterestValue = 0
}
currentInterestValue -= 1
interests[tag] = currentInterestValue
}
modifyInterests(user, interests)
return nil
}
func hasWatched(userId int64, videoId int64) bool {
filter := bson.D{{"user_id", userId}, {"video_id", videoId}}
var limit int64 = 1
documentCount, err := watchedVideosCollection.CountDocuments(mctx, filter, &options.CountOptions{
Limit: &limit,
})
if err != nil {
return true
}
return documentCount == int64(1)
}
// Utils
func getUser(userId int64) (models.DatabaseUser, error) {
query := bson.D{{"_id", userId}}
rawUser := usersCollection.FindOne(mctx, query)
var user models.DatabaseUser
err := rawUser.Decode(&user)
if err != nil {
return models.DatabaseUser{}, err
}
return user, nil
}
func getVideo(videoId int64) (models.DatabaseVideo, error) {
query := bson.D{{"_id", videoId}}
rawVideo := videosCollection.FindOne(mctx, query)
var video models.DatabaseVideo
err := rawVideo.Decode(&video)
if err != nil {
return models.DatabaseVideo{}, err
}
return video, nil
}
func uploadVideo(video models.DatabaseVideo) error {
_, err := videosCollection.InsertOne(mctx, video)
if err != nil {
return err
}
return nil
}
func modifyInterests(user models.DatabaseUser, interests map[string]int64) {
// Interests
for name, value := range interests {
currentInterestValue, exists := user.Interests[name]
if !exists {
currentInterestValue = 0
}
currentInterestValue += value
user.Interests[name] = currentInterestValue
}
update := bson.D{{"$set", bson.D{{"interests", user.Interests}}}}
filter := bson.D{{"_id", user.Id}}
_, err := usersCollection.UpdateOne(mctx, filter, update)
if err != nil {
log.Print(err)
return
}
}
| [
"\"port\"",
"\"mongo_uri\""
]
| []
| [
"mongo_uri",
"port"
]
| [] | ["mongo_uri", "port"] | go | 2 | 0 | |
version.go | package tddbc
import (
"fmt"
)
type Version struct {
Major int
Minor int
Patch int
}
func NewVersion(major int, minor int, patch int) (*Version, error) {
if major < 0 {
return nil, fmt.Errorf("Expected majar >= 0, but major is %d", major)
}
if minor < 0 {
return nil, fmt.Errorf("Expected minor >= 0, but minor is %d", minor)
}
if patch < 0 {
return nil, fmt.Errorf("Expected patch >= 0, but patch is %d", patch)
}
return &Version{Major: major, Minor: minor, Patch: patch}, nil
}
func (v *Version) String() string {
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
}
func (v *Version) Equal(other *Version) bool {
return v.Major == other.Major && v.Minor == other.Minor && v.Patch == other.Patch
}
func (v *Version) BumpPatchVersion() {
v.Patch++
}
func (v *Version) BumpMinorVersion() {
v.Patch = 0
v.Minor++
}
func (v *Version) BumpMajorVersion() {
v.Patch = 0
v.Minor = 0
v.Major++
}
| []
| []
| []
| [] | [] | go | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.