filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
gradle/wrapper/dists/gradle-1.12-all/4ff8jj5a73a7zgj5nnzv1ubq0/gradle-1.12/src/internal-integ-testing/org/gradle/integtests/fixtures/AvailableJavaHomes.java
|
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.integtests.fixtures;
import org.gradle.internal.jvm.Jre;
import org.gradle.internal.jvm.Jvm;
import org.gradle.internal.os.OperatingSystem;
import org.gradle.util.GFileUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Allows the tests to get hold of an alternative Java installation when needed.
*/
abstract public class AvailableJavaHomes {
private static File getJavaHome(String label) {
String value = System.getenv().get(String.format("JDK_%s", label));
return value == null ? null : GFileUtils.canonicalise(new File(value));
}
/**
* Locates a JVM installation that is different to the current JVM.
*/
public static File getBestAlternative() {
Jvm jvm = Jvm.current();
// Use environment variables
File javaHome = null;
if (jvm.getJavaVersion().isJava6Compatible()) {
javaHome = firstAvailable("15", "17");
} else if (jvm.getJavaVersion().isJava5Compatible()) {
javaHome = firstAvailable("16", "17");
}
if (javaHome != null) {
return javaHome;
}
if (OperatingSystem.current().isMacOsX()) {
// Search in the install dir used by the Apple jvms, followed by the install dir used by the OpenJDK jvms
List<File> installDirs = Arrays.asList(new File("/System/Library/Java/JavaVirtualMachines"), new File("/Library/Java/JavaVirtualMachines"));
for (File installDir : installDirs) {
if (installDir.isDirectory()) {
for (File candidate : installDir.listFiles()) {
javaHome = GFileUtils.canonicalise(new File(candidate, "Contents/Home"));
if (!javaHome.equals(jvm.getJavaHome()) && javaHome.isDirectory() && new File(javaHome, "bin/java").isFile()) {
return javaHome;
}
}
}
}
} else if (OperatingSystem.current().isLinux()) {
// Ubuntu specific
File installedJvms = new File("/usr/lib/jvm");
if (installedJvms.isDirectory()) {
for (File candidate : installedJvms.listFiles()) {
javaHome = GFileUtils.canonicalise(candidate);
if (!javaHome.equals(jvm.getJavaHome()) && javaHome.isDirectory() && new File(javaHome, "bin/java").isFile()) {
return javaHome;
}
}
}
} else if (OperatingSystem.current().isWindows()) {
//very simple algorithm trying to find java on windows
List<File> installDirs = new ArrayList<File>();
File candidate = new File("c:/Program Files/Java");
if (candidate.isDirectory()) {
installDirs.add(candidate);
}
// Attempt to look for 32-bit version under 64-bit OS
candidate = new File("c:/Program Files (x86)/Java");
if (candidate.isDirectory()) {
installDirs.add(candidate);
}
for (File installDir : installDirs) {
for (File file : installDir.listFiles()) {
if (file.getName().startsWith("jdk")) {
javaHome = GFileUtils.canonicalise(file);
if (!javaHome.equals(jvm.getJavaHome()) && javaHome.isDirectory() && new File(javaHome, "bin/java.exe").isFile()) {
return javaHome;
}
}
}
}
}
return null;
}
/**
* Locates a JRE installation for the current JVM. Prefers a stand-alone JRE installation over one that is part of a JDK install.
*
* @return The JRE home directory, or null if not found
*/
public static File getBestJre() {
Jvm jvm = Jvm.current();
Jre jre = jvm.getStandaloneJre();
if (jre != null) {
return jre.getHomeDir();
}
jre = jvm.getJre();
if (jre != null) {
return jre.getHomeDir();
}
return null;
}
public static File firstAvailable(String... labels) {
for (String label : labels) {
File found = getJavaHome(label);
if (found != null) {
return found;
}
}
return null;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
setup.py
|
#!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=requirements,
extras_require={
'full': requirements_hw + ['pycryptodomex'],
},
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'servers_regtest.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Deimos Wallet",
author="Thomas Voegtlin",
author_email="[email protected]",
license="MIT Licence",
url="https://deimoscoin.org",
long_description="""Lightweight Deimos Wallet"""
)
|
[] |
[] |
[
"XDG_DATA_HOME"
] |
[]
|
["XDG_DATA_HOME"]
|
python
| 1 | 0 | |
abciapp/service/deliver/delivertx_test.go
|
package deliver
import (
"container/list"
"fmt"
tx2 "github.com/bcbchain/bclib/tx/v2"
"github.com/bcbchain/bclib/algorithm"
"github.com/bcbchain/bclib/types"
"github.com/bcbchain/sdk/sdk/bn"
"github.com/bcbchain/sdk/sdk/jsoniter"
"github.com/bcbchain/sdk/sdk/std"
"os"
"path/filepath"
"strconv"
"testing"
"github.com/bcbchain/bclib/tendermint/tmlibs/common"
types2 "github.com/bcbchain/bclib/tendermint/abci/types"
"github.com/bcbchain/bclib/tendermint/tmlibs/log"
)
func TestAppDeliver_DeliverTx(t *testing.T) {
app := createAppDeliver()
//req := types2.RequestBeginBlock{}
//app.BeginBlock(req)
tx := txWrapper()
app.deliverBCTx([]byte(tx))
}
func TestAppDeliver_emitFeeReceipts(t *testing.T) {
app := createAppDeliver()
gasLimit := int64(50000)
response := types.Response{
Code: 200,
Log: "DeliverTx success",
Data: "",
Info: "",
GasLimit: gasLimit,
GasUsed: 20000,
Fee: 20000 * 2500,
Tags: fakeTags(),
TxHash: nil,
Height: 0,
}
tags,_ := app.emitFeeReceipts(types.Transaction{},&response, true)
//verify
if len(tags) != num+q {
t.Error("number of total fee receipt is wrong", "got: ", len(tags), "exp: ", num+q)
}
for _, tag := range tags {
rcpt := types.Receipt{}
jsoniter.Unmarshal(tag.Value, &rcpt)
fee := std.Fee{}
jsoniter.Unmarshal(rcpt.ReceiptBytes, &fee)
fmt.Println(rcpt)
fmt.Println(fee)
}
fmt.Println("test emitFeeReceipts(false)")
//failure
tags,_ = app.emitFeeReceipts(types.Transaction{},&response, false)
//verify
if len(tags) != q {
t.Error("number of total fee receipt is wrong", "got: ", len(tags), "exp: ", q)
}
for _, tag := range tags {
rcpt := types.Receipt{}
jsoniter.Unmarshal(tag.Value, &rcpt)
fee := std.Fee{}
jsoniter.Unmarshal(rcpt.ReceiptBytes, &fee)
fmt.Println(rcpt)
fmt.Println(fee)
}
}
const (
num = 14
q = 3
)
func fakeTags() []common.KVPair {
tags := make([]common.KVPair, 0)
for i := 0; i < num; i++ {
fee := std.Fee{
Token: "0123456789",
From: "addddddddddddddddddddddddd" + strconv.Itoa(i%q),
Value: 10000,
}
bf, _ := jsoniter.Marshal(fee)
receipt := types.Receipt{
Name: "std.fee",
ContractAddress: "",
ReceiptBytes: bf,
ReceiptHash: nil,
}
b, _ := jsoniter.Marshal(receipt)
kv := common.KVPair{
Key: []byte(fmt.Sprintf("/%d/%s", len(tags), "std.fee")),
Value: b,
}
tags = append(tags, kv)
}
return tags
}
func createAppDeliver() AppDeliver {
app := AppDeliver{
logger: nil,
txID: 0,
blockHash: nil,
blockHeader: types2.Header{},
appState: nil,
hashList: list.New().Init(),
chainID: "bcb",
sponser: "",
rewarder: "",
udValidator: false,
validators: nil,
fee: 0,
rewards: nil,
}
app.logger = createLogger()
return app
}
func createLogger() log.Logger {
home := os.Getenv("HOME")
fmt.Println(home)
logger := log.NewTMLogger(filepath.Join(home, "log"), "bcchain")
logger.AllowLevel("debug")
logger.SetOutputAsync(false)
logger.SetOutputToFile(false)
logger.SetOutputToScreen(true)
// logger.SetOutputFileSize(common.GlobalConfig.Log_size)
return logger
}
func txWrapper() string {
tx2.Init("bcb")
methodID1 := algorithm.BytesToUint32(algorithm.CalcMethodId("Transfer(types.Address,bn.Number)"))
toContract1 := "bcbMWedWqzzW8jkt5tntTomQQEN7fSwWFhw6"
toAccount := "bcbCpeczqoSoxLxx1x3UyuKsaS4J8yamzWzz"
value := bn.N(1000000000)
itemInfo1 := tx2.WrapInvokeParams(toAccount, value)
message1 := types.Message{
Contract: toContract1,
MethodID: methodID1,
Items: itemInfo1,
}
nonce := uint64(1)
gasLimit := int64(500)
note := "Example for cascade invoke smart contract."
txPayloadBytesRlp := tx2.WrapPayload(nonce, gasLimit, note, message1)
privKeyStr := "0x4a2c14697282e658b3ed7dd5324de1a102d216d6fa50d5937ffe89f35cbc12aa68eb9a09813bdf7c0869bf34a244cc545711509fe70f978d121afd3a4ae610e6"
finalTx := tx2.WrapTx(txPayloadBytesRlp, privKeyStr)
return finalTx
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
demo/demo.py
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import argparse
import glob
import os
import sys
import torch.nn.functional as F
import cv2
import numpy as np
from tqdm import tqdm
from torch.backends import cudnn
import pickle
import os.path as osp
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.utils.logger import setup_logger
from fastreid.utils.file_io import PathManager
from predictor import FeatureExtractionDemo
os.environ['CUDA_VISIBLE_DEVICES']='1'
# import some modules added in project like this below
# sys.path.append("projects/PartialReID")
# from partialreid import *
cudnn.benchmark = True
setup_logger(name="fastreid")
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# add_partialreid_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Feature extraction with reid models")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
# default="./configs/MSMT17/bagtricks_R101-ibn.yml"
default="./configs/MSMT17/bagtricks_R50.yml"
)
parser.add_argument(
"--parallel",
action='store_true',
help='If use multiprocess for feature extraction.'
)
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
default="/home/huangju/dataset/msmt17/test/*.jpg"
)
parser.add_argument(
"--output",
default='demo_output',
help='path to save features'
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
#default="MODEL.WEIGHTS ./checkpoints/msmt_bot_R101-ibn.pth",
#default="MODEL.WEIGHTS ./checkpoints/msmt_bot_R50.pth",
nargs=argparse.REMAINDER,
)
return parser
def postprocess(features):
# Normalize feature to compute cosine distance
features = F.normalize(features)
features = features.cpu().data.numpy()
return features
if __name__ == '__main__':
args = get_parser().parse_args()
cfg = setup_cfg(args)
demo = FeatureExtractionDemo(cfg, parallel=args.parallel)
# pathss=[]
# dir_path="/home/huangju/dataset/dml_ped25-v2/dml_ped25-v2/"
# ids=["F2", "S2", 'R2', 'T2']
# for term in ids:
# new_root=dir_path+term
# img_paths = glob.glob(osp.join(new_root, '*/*.jpg'))
# for img_path in img_paths:
# if img_path.split('/')[-1].split('.')[0].split("_")[-1]!="face":
# pathss.append(img_path)
pathss=[]
dir_path="/home/huangju/dataset/msmt17/test"
img_paths = glob.glob(osp.join(dir_path, '*/*.jpg'))
for img_path in img_paths:
pathss.append(img_path)
msmt_dict={}
for path in tqdm(pathss):
img = cv2.imread(path)
# print("img!!")
# print(img)
feat = demo.run_on_image(img)
feat = postprocess(feat)
key="test/"+path.split("/")[-1]
# print(key)
# print(feat)
msmt_dict[key]=feat[0]
#32621张训练集人体图像
with open("msmt17-test.pkl","wb") as f:
pickle.dump(msmt_dict,f)
# with open("msmt17-train.pkl","rb") as f:
# msmt_dict=pickle.load(f)
# print(len(msmt_dict))
# 运行命令:python demo/demo.py --opts MODEL.WEIGHTS checkpoints/msmt_bot_R50.pth
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
src/cmd/dist/buildruntime.go
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"os"
"strings"
)
/*
* Helpers for building runtime.
*/
// mkzversion writes zversion.go:
//
// package sys
//
// const TheVersion = <version>
// const Goexperiment = <goexperiment>
// const StackGuardMultiplier = <multiplier value>
//
func mkzversion(dir, file string) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "package sys\n")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "const TheVersion = `%s`\n", findgoversion())
fmt.Fprintf(&buf, "const Goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT"))
fmt.Fprintf(&buf, "const StackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault())
writefile(buf.String(), file, writeSkipSame)
}
// mkzbootstrap writes cmd/internal/objabi/zbootstrap.go:
//
// package objabi
//
// const defaultGOROOT = <goroot>
// const defaultGO386 = <go386>
// const defaultGOARM = <goarm>
// const defaultGOMIPS = <gomips>
// const defaultGOMIPS64 = <gomips64>
// const defaultGOOS = runtime.GOOS
// const defaultGOARCH = runtime.GOARCH
// const defaultGO_EXTLINK_ENABLED = <goextlinkenabled>
// const version = <version>
// const stackGuardMultiplierDefault = <multiplier value>
// const goexperiment = <goexperiment>
//
// The use of runtime.GOOS and runtime.GOARCH makes sure that
// a cross-compiled compiler expects to compile for its own target
// system. That is, if on a Mac you do:
//
// GOOS=linux GOARCH=ppc64 go build cmd/compile
//
// the resulting compiler will default to generating linux/ppc64 object files.
// This is more useful than having it default to generating objects for the
// original target (in this example, a Mac).
func mkzbootstrap(file string) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "package objabi\n")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "import \"runtime\"\n")
fmt.Fprintln(&buf)
fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386)
fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm)
fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips)
fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64)
fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n")
fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n")
fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled)
fmt.Fprintf(&buf, "const defaultGO_LDSO = `%s`\n", defaultldso)
fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion())
fmt.Fprintf(&buf, "const stackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault())
fmt.Fprintf(&buf, "const goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT"))
writefile(buf.String(), file, writeSkipSame)
}
// stackGuardMultiplierDefault returns a multiplier to apply to the default
// stack guard size. Larger multipliers are used for non-optimized
// builds that have larger stack frames.
func stackGuardMultiplierDefault() int {
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
return 2
}
}
return 1
}
|
[
"\"GOEXPERIMENT\"",
"\"GOEXPERIMENT\"",
"\"GO_GCFLAGS\""
] |
[] |
[
"GO_GCFLAGS",
"GOEXPERIMENT"
] |
[]
|
["GO_GCFLAGS", "GOEXPERIMENT"]
|
go
| 2 | 0 | |
server/oauth/src/main/java/com/springsource/oauthservice/Bootstrap.java
|
/*
* Copyright 2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.springsource.oauthservice;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.cloudfoundry.org.codehaus.jackson.JsonFactory;
import org.cloudfoundry.org.codehaus.jackson.JsonParser;
import org.cloudfoundry.org.codehaus.jackson.map.ObjectMapper;
/**
* TEMPORARY: Bootstrap to log the value of VCAP_SERVICES so that I can create a DB URL to the bound database service.
*
* @author wallsc
*/
public class Bootstrap {
private static final Log LOG = LogFactory.getLog(Bootstrap.class);
public void go() throws Exception {
String vcapServices = System.getenv("VCAP_SERVICES");
LOG.debug("VCAP_SERVICES: " + vcapServices);
// jdbc:postgresql://172.30.48.126:5432/d6f69ba9c3c6349ac830af2973e31b779
// pull values out and construct JDBC URL
Map credentials = getCredentialsMap(vcapServices);
String dbName = (String) credentials.get("name");
String host = (String) credentials.get("host");
Integer port = (Integer) credentials.get("port");
String username = (String) credentials.get("username");
String password = (String) credentials.get("password");
LOG.debug(" JDBC URL: jdbc:postgresql://" + host + ":" + port + "/" + dbName);
LOG.debug(" DB USERNAME: " + username);
LOG.debug(" DB PASSWORD: " + password);
}
public Map getCredentialsMap(String vcapServices) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonFactory jsonFactory = mapper.getJsonFactory();
JsonParser jsonParser = jsonFactory.createJsonParser(vcapServices);
Map map = jsonParser.readValueAs(Map.class);
List pgMap = (List) map.get("postgresql-9.0");
Map dbMap = (Map) pgMap.get(0);
Map credentialsMap = (Map) dbMap.get("credentials");
return credentialsMap;
}
}
|
[
"\"VCAP_SERVICES\""
] |
[] |
[
"VCAP_SERVICES"
] |
[]
|
["VCAP_SERVICES"]
|
java
| 1 | 0 | |
cxdb/cxdbsql/dbconfig.go
|
package cxdbsql
import (
"bufio"
"os"
"path/filepath"
"github.com/jessevdk/go-flags"
"github.com/mit-dci/opencx/logging"
)
type dbsqlConfig struct {
// Filename of config file where this stuff can be set as well
ConfigFile string
// database home dir
DBHomeDir string `long:"dir" description:"Location of the root directory for the sql db info and config"`
// database info required to establish connection
DBUsername string `long:"dbuser" description:"database username"`
DBPassword string `long:"dbpassword" description:"database password"`
DBHost string `long:"dbhost" description:"Host for the database connection"`
DBPort uint16 `long:"dbport" description:"Port for the database connection"`
// database schema names
ReadOnlyOrderSchemaName string `long:"readonlyorderschema" description:"Name of read-only orderbook schema"`
ReadOnlyAuctionSchemaName string `long:"readonlyauctionschema" description:"Name of read-only auction schema"`
ReadOnlyBalanceSchemaName string `long:"readonlybalanceschema" description:"Name of read-only balance schema"`
BalanceSchemaName string `long:"balanceschema" description:"Name of balance schema"`
DepositSchemaName string `long:"depositschema" description:"Name of deposit schema"`
PendingDepositSchemaName string `long:"penddepschema" description:"Name of pending deposit schema"`
PuzzleSchemaName string `long:"puzzleschema" description:"Name of schema for puzzle orderbooks"`
AuctionSchemaName string `long:"auctionschema" description:"Name of schema for auction ID"`
AuctionOrderSchemaName string `long:"auctionorderschema" description:"Name of schema for auction orderbook"`
OrderSchemaName string `long:"orderschema" description:"Name of schema for limit orderbook"`
PeerSchemaName string `long:"peerschema" description:"Name of schema for peer storage"`
// database table names
PuzzleTableName string `long:"puzzletable" description:"Name of table for puzzle orderbooks"`
AuctionOrderTableName string `long:"auctionordertable" description:"Name of table for auction orders"`
PeerTableName string `long:"peertable" description:"Name of table for peer storage"`
}
// Let these be turned into config things at some point
var (
defaultConfigFilename = "sqldb.conf"
defaultHomeDir = os.Getenv("HOME")
defaultDBHomeDirName = defaultHomeDir + "/.opencx/db/"
defaultDBPort = uint16(3306)
defaultDBHost = "localhost"
defaultDBUser = "opencx"
defaultDBPass = "testpass"
// definitely move this to a config file
defaultReadOnlyOrderSchema = "orders_readonly"
defaultReadOnlyAuctionSchema = "auctionorders_readonly"
defaultReadOnlyBalanceSchema = "balances_readonly"
defaultBalanceSchema = "balances"
defaultDepositSchema = "deposit"
defaultPendingDepositSchema = "pending_deposits"
defaultPuzzleSchema = "puzzle"
defaultAuctionSchema = "auctions"
defaultAuctionOrderSchema = "auctionorder"
defaultOrderSchema = "orders"
defaultPeerSchema = "peers"
// tables
defaultAuctionOrderTable = "auctionorders"
defaultPuzzleTable = "puzzles"
defaultPeerTable = "opencxpeers"
// Set defaults
defaultConf = &dbsqlConfig{
// home dir
DBHomeDir: defaultDBHomeDirName,
// user / pass / net stuff
DBUsername: defaultDBUser,
DBPassword: defaultDBPass,
DBHost: defaultDBHost,
DBPort: defaultDBPort,
// schemas
ReadOnlyAuctionSchemaName: defaultReadOnlyAuctionSchema,
ReadOnlyOrderSchemaName: defaultReadOnlyOrderSchema,
ReadOnlyBalanceSchemaName: defaultReadOnlyBalanceSchema,
BalanceSchemaName: defaultBalanceSchema,
DepositSchemaName: defaultDepositSchema,
PendingDepositSchemaName: defaultPendingDepositSchema,
PuzzleSchemaName: defaultPuzzleSchema,
AuctionSchemaName: defaultAuctionSchema,
AuctionOrderSchemaName: defaultAuctionOrderSchema,
OrderSchemaName: defaultOrderSchema,
PeerSchemaName: defaultPeerSchema,
// tables
PuzzleTableName: defaultPuzzleTable,
AuctionOrderTableName: defaultAuctionOrderTable,
PeerTableName: defaultPeerTable,
}
)
// newConfigParser returns a new command line flags parser.
func newConfigParser(conf *dbsqlConfig, options flags.Options) *flags.Parser {
parser := flags.NewParser(conf, options)
return parser
}
// createDefaultConfigFile creates a config file -- only call this if the
// config file isn't already there
func createDefaultConfigFile(destinationPath string) error {
dest, err := os.OpenFile(filepath.Join(destinationPath, defaultConfigFilename),
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer dest.Close()
writer := bufio.NewWriter(dest)
defaultArgs := []byte("dbuser=opencx\ndbpassword=testpass\n")
_, err = writer.Write(defaultArgs)
if err != nil {
return err
}
writer.Flush()
return nil
}
func dbConfigSetup(conf *dbsqlConfig) {
// Pre-parse the command line options to see if an alternative config
// file or the version flag was specified. Config file will be read later
// and cli options would be parsed again below
parser := newConfigParser(conf, flags.Default)
// create home directory
_, err := os.Stat(conf.DBHomeDir)
if os.IsNotExist(err) {
if err = os.MkdirAll(conf.DBHomeDir, 0700); err != nil {
logging.Fatalf("Could not make dirs needed for home dir %s", conf.DBHomeDir)
}
logging.Infof("Creating a new db home directory at %s", conf.DBHomeDir)
if err = createDefaultConfigFile(conf.DBHomeDir); err != nil {
logging.Fatalf("Error creating a default config file: %s", conf.DBHomeDir)
}
} else if err != nil {
logging.Fatalf("Error while creating a directory: %s", err)
}
if _, err := os.Stat(filepath.Join(conf.DBHomeDir, defaultConfigFilename)); os.IsNotExist(err) {
// if there is no config file found over at the directory, create one
logging.Infof("Creating a new default db config file at %s", conf.DBHomeDir)
// source of error
if err = createDefaultConfigFile(filepath.Join(conf.DBHomeDir)); err != nil {
logging.Fatal(err)
}
}
conf.ConfigFile = filepath.Join(conf.DBHomeDir, defaultConfigFilename)
// lets parse the config file provided, if any
if err = flags.NewIniParser(parser).ParseFile(conf.ConfigFile); err != nil {
// If the error isn't a path error then we care about it
if _, ok := err.(*os.PathError); !ok {
logging.Fatalf("Non-path error encountered when parsing config file: %s", err)
}
}
return
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
vendor/github.com/whosonfirst/go-whosonfirst-static/vendor/github.com/whosonfirst/go-whosonfirst-image/vendor/golang.org/x/net/http2/server_test.go
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"crypto/tls"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/http2/hpack"
)
var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered")
func stderrv() io.Writer {
if *stderrVerbose {
return os.Stderr
}
return ioutil.Discard
}
type serverTester struct {
cc net.Conn // client conn
t testing.TB
ts *httptest.Server
fr *Framer
serverLogBuf bytes.Buffer // logger for httptest.Server
logFilter []string // substrings to filter out
scMu sync.Mutex // guards sc
sc *serverConn
hpackDec *hpack.Decoder
decodedHeaders [][2]string
// If http2debug!=2, then we capture Frame debug logs that will be written
// to t.Log after a test fails. The read and write logs use separate locks
// and buffers so we don't accidentally introduce synchronization between
// the read and write goroutines, which may hide data races.
frameReadLogMu sync.Mutex
frameReadLogBuf bytes.Buffer
frameWriteLogMu sync.Mutex
frameWriteLogBuf bytes.Buffer
// writing headers:
headerBuf bytes.Buffer
hpackEnc *hpack.Encoder
}
func init() {
testHookOnPanicMu = new(sync.Mutex)
goAwayTimeout = 25 * time.Millisecond
}
func resetHooks() {
testHookOnPanicMu.Lock()
testHookOnPanic = nil
testHookOnPanicMu.Unlock()
}
type serverTesterOpt string
var optOnlyServer = serverTesterOpt("only_server")
var optQuiet = serverTesterOpt("quiet_logging")
var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames")
func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {
resetHooks()
ts := httptest.NewUnstartedServer(handler)
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{NextProtoTLS},
}
var onlyServer, quiet, framerReuseFrames bool
h2server := new(Server)
for _, opt := range opts {
switch v := opt.(type) {
case func(*tls.Config):
v(tlsConfig)
case func(*httptest.Server):
v(ts)
case func(*Server):
v(h2server)
case serverTesterOpt:
switch v {
case optOnlyServer:
onlyServer = true
case optQuiet:
quiet = true
case optFramerReuseFrames:
framerReuseFrames = true
}
case func(net.Conn, http.ConnState):
ts.Config.ConnState = v
default:
t.Fatalf("unknown newServerTester option type %T", v)
}
}
ConfigureServer(ts.Config, h2server)
st := &serverTester{
t: t,
ts: ts,
}
st.hpackEnc = hpack.NewEncoder(&st.headerBuf)
st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)
ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
if quiet {
ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
} else {
ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags)
}
ts.StartTLS()
if VerboseLogs {
t.Logf("Running test server at: %s", ts.URL)
}
testHookGetServerConn = func(v *serverConn) {
st.scMu.Lock()
defer st.scMu.Unlock()
st.sc = v
}
log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))
if !onlyServer {
cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
if err != nil {
t.Fatal(err)
}
st.cc = cc
st.fr = NewFramer(cc, cc)
if framerReuseFrames {
st.fr.SetReuseFrames()
}
if !logFrameReads && !logFrameWrites {
st.fr.debugReadLoggerf = func(m string, v ...interface{}) {
m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
st.frameReadLogMu.Lock()
fmt.Fprintf(&st.frameReadLogBuf, m, v...)
st.frameReadLogMu.Unlock()
}
st.fr.debugWriteLoggerf = func(m string, v ...interface{}) {
m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n"
st.frameWriteLogMu.Lock()
fmt.Fprintf(&st.frameWriteLogBuf, m, v...)
st.frameWriteLogMu.Unlock()
}
st.fr.logReads = true
st.fr.logWrites = true
}
}
return st
}
func (st *serverTester) closeConn() {
st.scMu.Lock()
defer st.scMu.Unlock()
st.sc.conn.Close()
}
func (st *serverTester) addLogFilter(phrase string) {
st.logFilter = append(st.logFilter, phrase)
}
func (st *serverTester) stream(id uint32) *stream {
ch := make(chan *stream, 1)
st.sc.serveMsgCh <- func(int) {
ch <- st.sc.streams[id]
}
return <-ch
}
func (st *serverTester) streamState(id uint32) streamState {
ch := make(chan streamState, 1)
st.sc.serveMsgCh <- func(int) {
state, _ := st.sc.state(id)
ch <- state
}
return <-ch
}
// loopNum reports how many times this conn's select loop has gone around.
func (st *serverTester) loopNum() int {
lastc := make(chan int, 1)
st.sc.serveMsgCh <- func(loopNum int) {
lastc <- loopNum
}
return <-lastc
}
// awaitIdle heuristically awaits for the server conn's select loop to be idle.
// The heuristic is that the server connection's serve loop must schedule
// 50 times in a row without any channel sends or receives occurring.
func (st *serverTester) awaitIdle() {
remain := 50
last := st.loopNum()
for remain > 0 {
n := st.loopNum()
if n == last+1 {
remain--
} else {
remain = 50
}
last = n
}
}
func (st *serverTester) Close() {
if st.t.Failed() {
st.frameReadLogMu.Lock()
if st.frameReadLogBuf.Len() > 0 {
st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String())
}
st.frameReadLogMu.Unlock()
st.frameWriteLogMu.Lock()
if st.frameWriteLogBuf.Len() > 0 {
st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String())
}
st.frameWriteLogMu.Unlock()
// If we failed already (and are likely in a Fatal,
// unwindowing), force close the connection, so the
// httptest.Server doesn't wait forever for the conn
// to close.
if st.cc != nil {
st.cc.Close()
}
}
st.ts.Close()
if st.cc != nil {
st.cc.Close()
}
log.SetOutput(os.Stderr)
}
// greet initiates the client's HTTP/2 connection into a state where
// frames may be sent.
func (st *serverTester) greet() {
st.greetAndCheckSettings(func(Setting) error { return nil })
}
func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) {
st.writePreface()
st.writeInitialSettings()
st.wantSettings().ForeachSetting(checkSetting)
st.writeSettingsAck()
// The initial WINDOW_UPDATE and SETTINGS ACK can come in any order.
var gotSettingsAck bool
var gotWindowUpdate bool
for i := 0; i < 2; i++ {
f, err := st.readFrame()
if err != nil {
st.t.Fatal(err)
}
switch f := f.(type) {
case *SettingsFrame:
if !f.Header().Flags.Has(FlagSettingsAck) {
st.t.Fatal("Settings Frame didn't have ACK set")
}
gotSettingsAck = true
case *WindowUpdateFrame:
if f.FrameHeader.StreamID != 0 {
st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID)
}
incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize)
if f.Increment != incr {
st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr)
}
gotWindowUpdate = true
default:
st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f)
}
}
if !gotSettingsAck {
st.t.Fatalf("Didn't get a settings ACK")
}
if !gotWindowUpdate {
st.t.Fatalf("Didn't get a window update")
}
}
func (st *serverTester) writePreface() {
n, err := st.cc.Write(clientPreface)
if err != nil {
st.t.Fatalf("Error writing client preface: %v", err)
}
if n != len(clientPreface) {
st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface))
}
}
func (st *serverTester) writeInitialSettings() {
if err := st.fr.WriteSettings(); err != nil {
st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err)
}
}
func (st *serverTester) writeSettingsAck() {
if err := st.fr.WriteSettingsAck(); err != nil {
st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err)
}
}
func (st *serverTester) writeHeaders(p HeadersFrameParam) {
if err := st.fr.WriteHeaders(p); err != nil {
st.t.Fatalf("Error writing HEADERS: %v", err)
}
}
func (st *serverTester) writePriority(id uint32, p PriorityParam) {
if err := st.fr.WritePriority(id, p); err != nil {
st.t.Fatalf("Error writing PRIORITY: %v", err)
}
}
func (st *serverTester) encodeHeaderField(k, v string) {
err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
if err != nil {
st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
}
}
// encodeHeaderRaw is the magic-free version of encodeHeader.
// It takes 0 or more (k, v) pairs and encodes them.
func (st *serverTester) encodeHeaderRaw(headers ...string) []byte {
if len(headers)%2 == 1 {
panic("odd number of kv args")
}
st.headerBuf.Reset()
for len(headers) > 0 {
k, v := headers[0], headers[1]
st.encodeHeaderField(k, v)
headers = headers[2:]
}
return st.headerBuf.Bytes()
}
// encodeHeader encodes headers and returns their HPACK bytes. headers
// must contain an even number of key/value pairs. There may be
// multiple pairs for keys (e.g. "cookie"). The :method, :path, and
// :scheme headers default to GET, / and https. The :authority header
// defaults to st.ts.Listener.Addr().
func (st *serverTester) encodeHeader(headers ...string) []byte {
if len(headers)%2 == 1 {
panic("odd number of kv args")
}
st.headerBuf.Reset()
defaultAuthority := st.ts.Listener.Addr().String()
if len(headers) == 0 {
// Fast path, mostly for benchmarks, so test code doesn't pollute
// profiles when we're looking to improve server allocations.
st.encodeHeaderField(":method", "GET")
st.encodeHeaderField(":scheme", "https")
st.encodeHeaderField(":authority", defaultAuthority)
st.encodeHeaderField(":path", "/")
return st.headerBuf.Bytes()
}
if len(headers) == 2 && headers[0] == ":method" {
// Another fast path for benchmarks.
st.encodeHeaderField(":method", headers[1])
st.encodeHeaderField(":scheme", "https")
st.encodeHeaderField(":authority", defaultAuthority)
st.encodeHeaderField(":path", "/")
return st.headerBuf.Bytes()
}
pseudoCount := map[string]int{}
keys := []string{":method", ":scheme", ":authority", ":path"}
vals := map[string][]string{
":method": {"GET"},
":scheme": {"https"},
":authority": {defaultAuthority},
":path": {"/"},
}
for len(headers) > 0 {
k, v := headers[0], headers[1]
headers = headers[2:]
if _, ok := vals[k]; !ok {
keys = append(keys, k)
}
if strings.HasPrefix(k, ":") {
pseudoCount[k]++
if pseudoCount[k] == 1 {
vals[k] = []string{v}
} else {
// Allows testing of invalid headers w/ dup pseudo fields.
vals[k] = append(vals[k], v)
}
} else {
vals[k] = append(vals[k], v)
}
}
for _, k := range keys {
for _, v := range vals[k] {
st.encodeHeaderField(k, v)
}
}
return st.headerBuf.Bytes()
}
// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.
func (st *serverTester) bodylessReq1(headers ...string) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(headers...),
EndStream: true,
EndHeaders: true,
})
}
func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {
if err := st.fr.WriteData(streamID, endStream, data); err != nil {
st.t.Fatalf("Error writing DATA: %v", err)
}
}
func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
st.t.Fatalf("Error writing DATA: %v", err)
}
}
func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) {
ch := make(chan interface{}, 1)
go func() {
fr, err := fr.ReadFrame()
if err != nil {
ch <- err
} else {
ch <- fr
}
}()
t := time.NewTimer(wait)
select {
case v := <-ch:
t.Stop()
if fr, ok := v.(Frame); ok {
return fr, nil
}
return nil, v.(error)
case <-t.C:
return nil, errors.New("timeout waiting for frame")
}
}
func (st *serverTester) readFrame() (Frame, error) {
return readFrameTimeout(st.fr, 2*time.Second)
}
func (st *serverTester) wantHeaders() *HeadersFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a HEADERS frame: %v", err)
}
hf, ok := f.(*HeadersFrame)
if !ok {
st.t.Fatalf("got a %T; want *HeadersFrame", f)
}
return hf
}
func (st *serverTester) wantContinuation() *ContinuationFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err)
}
cf, ok := f.(*ContinuationFrame)
if !ok {
st.t.Fatalf("got a %T; want *ContinuationFrame", f)
}
return cf
}
func (st *serverTester) wantData() *DataFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a DATA frame: %v", err)
}
df, ok := f.(*DataFrame)
if !ok {
st.t.Fatalf("got a %T; want *DataFrame", f)
}
return df
}
func (st *serverTester) wantSettings() *SettingsFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err)
}
sf, ok := f.(*SettingsFrame)
if !ok {
st.t.Fatalf("got a %T; want *SettingsFrame", f)
}
return sf
}
func (st *serverTester) wantPing() *PingFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a PING frame: %v", err)
}
pf, ok := f.(*PingFrame)
if !ok {
st.t.Fatalf("got a %T; want *PingFrame", f)
}
return pf
}
func (st *serverTester) wantGoAway() *GoAwayFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err)
}
gf, ok := f.(*GoAwayFrame)
if !ok {
st.t.Fatalf("got a %T; want *GoAwayFrame", f)
}
return gf
}
func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting an RSTStream frame: %v", err)
}
rs, ok := f.(*RSTStreamFrame)
if !ok {
st.t.Fatalf("got a %T; want *RSTStreamFrame", f)
}
if rs.FrameHeader.StreamID != streamID {
st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID)
}
if rs.ErrCode != errCode {
st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode)
}
}
func (st *serverTester) wantWindowUpdate(streamID, incr uint32) {
f, err := st.readFrame()
if err != nil {
st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err)
}
wu, ok := f.(*WindowUpdateFrame)
if !ok {
st.t.Fatalf("got a %T; want *WindowUpdateFrame", f)
}
if wu.FrameHeader.StreamID != streamID {
st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID)
}
if wu.Increment != incr {
st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr)
}
}
func (st *serverTester) wantSettingsAck() {
f, err := st.readFrame()
if err != nil {
st.t.Fatal(err)
}
sf, ok := f.(*SettingsFrame)
if !ok {
st.t.Fatalf("Wanting a settings ACK, received a %T", f)
}
if !sf.Header().Flags.Has(FlagSettingsAck) {
st.t.Fatal("Settings Frame didn't have ACK set")
}
}
func (st *serverTester) wantPushPromise() *PushPromiseFrame {
f, err := st.readFrame()
if err != nil {
st.t.Fatal(err)
}
ppf, ok := f.(*PushPromiseFrame)
if !ok {
st.t.Fatalf("Wanted PushPromise, received %T", ppf)
}
return ppf
}
func TestServer(t *testing.T) {
gotReq := make(chan bool, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Foo", "Bar")
gotReq <- true
})
defer st.Close()
covers("3.5", `
The server connection preface consists of a potentially empty
SETTINGS frame ([SETTINGS]) that MUST be the first frame the
server sends in the HTTP/2 connection.
`)
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(),
EndStream: true, // no DATA frames
EndHeaders: true,
})
select {
case <-gotReq:
case <-time.After(2 * time.Second):
t.Error("timeout waiting for request")
}
}
func TestServer_Request_Get(t *testing.T) {
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader("foo-bar", "some-value"),
EndStream: true, // no DATA frames
EndHeaders: true,
})
}, func(r *http.Request) {
if r.Method != "GET" {
t.Errorf("Method = %q; want GET", r.Method)
}
if r.URL.Path != "/" {
t.Errorf("URL.Path = %q; want /", r.URL.Path)
}
if r.ContentLength != 0 {
t.Errorf("ContentLength = %v; want 0", r.ContentLength)
}
if r.Close {
t.Error("Close = true; want false")
}
if !strings.Contains(r.RemoteAddr, ":") {
t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr)
}
if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {
t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor)
}
wantHeader := http.Header{
"Foo-Bar": []string{"some-value"},
}
if !reflect.DeepEqual(r.Header, wantHeader) {
t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
}
if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
t.Errorf("Read = %d, %v; want 0, EOF", n, err)
}
})
}
func TestServer_Request_Get_PathSlashes(t *testing.T) {
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":path", "/%2f/"),
EndStream: true, // no DATA frames
EndHeaders: true,
})
}, func(r *http.Request) {
if r.RequestURI != "/%2f/" {
t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI)
}
if r.URL.Path != "///" {
t.Errorf("URL.Path = %q; want ///", r.URL.Path)
}
})
}
// TODO: add a test with EndStream=true on the HEADERS but setting a
// Content-Length anyway. Should we just omit it and force it to
// zero?
func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: true,
EndHeaders: true,
})
}, func(r *http.Request) {
if r.Method != "POST" {
t.Errorf("Method = %q; want POST", r.Method)
}
if r.ContentLength != 0 {
t.Errorf("ContentLength = %v; want 0", r.ContentLength)
}
if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 {
t.Errorf("Read = %d, %v; want 0, EOF", n, err)
}
})
}
func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {
testBodyContents(t, -1, "", func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, nil) // just kidding. empty body.
})
}
func TestServer_Request_Post_Body_OneData(t *testing.T) {
const content = "Some content"
testBodyContents(t, -1, content, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, []byte(content))
})
}
func TestServer_Request_Post_Body_TwoData(t *testing.T) {
const content = "Some content"
testBodyContents(t, -1, content, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, false, []byte(content[:5]))
st.writeData(1, true, []byte(content[5:]))
})
}
func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {
const content = "Some content"
testBodyContents(t, int64(len(content)), content, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(
":method", "POST",
"content-length", strconv.Itoa(len(content)),
),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, []byte(content))
})
}
func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {
testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes",
func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(
":method", "POST",
"content-length", "3",
),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, []byte("12"))
})
}
func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {
testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes",
func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(
":method", "POST",
"content-length", "4",
),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, []byte("12345"))
})
}
func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {
testServerRequest(t, write, func(r *http.Request) {
if r.Method != "POST" {
t.Errorf("Method = %q; want POST", r.Method)
}
if r.ContentLength != wantContentLength {
t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
}
all, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
if string(all) != wantBody {
t.Errorf("Read = %q; want %q", all, wantBody)
}
if err := r.Body.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
})
}
func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {
testServerRequest(t, write, func(r *http.Request) {
if r.Method != "POST" {
t.Errorf("Method = %q; want POST", r.Method)
}
if r.ContentLength != wantContentLength {
t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength)
}
all, err := ioutil.ReadAll(r.Body)
if err == nil {
t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.",
wantReadError, all)
}
if !strings.Contains(err.Error(), wantReadError) {
t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError)
}
if err := r.Body.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
})
}
// Using a Host header, instead of :authority
func TestServer_Request_Get_Host(t *testing.T) {
const host = "example.com"
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":authority", "", "host", host),
EndStream: true,
EndHeaders: true,
})
}, func(r *http.Request) {
if r.Host != host {
t.Errorf("Host = %q; want %q", r.Host, host)
}
})
}
// Using an :authority pseudo-header, instead of Host
func TestServer_Request_Get_Authority(t *testing.T) {
const host = "example.com"
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":authority", host),
EndStream: true,
EndHeaders: true,
})
}, func(r *http.Request) {
if r.Host != host {
t.Errorf("Host = %q; want %q", r.Host, host)
}
})
}
func TestServer_Request_WithContinuation(t *testing.T) {
wantHeader := http.Header{
"Foo-One": []string{"value-one"},
"Foo-Two": []string{"value-two"},
"Foo-Three": []string{"value-three"},
}
testServerRequest(t, func(st *serverTester) {
fullHeaders := st.encodeHeader(
"foo-one", "value-one",
"foo-two", "value-two",
"foo-three", "value-three",
)
remain := fullHeaders
chunks := 0
for len(remain) > 0 {
const maxChunkSize = 5
chunk := remain
if len(chunk) > maxChunkSize {
chunk = chunk[:maxChunkSize]
}
remain = remain[len(chunk):]
if chunks == 0 {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: chunk,
EndStream: true, // no DATA frames
EndHeaders: false, // we'll have continuation frames
})
} else {
err := st.fr.WriteContinuation(1, len(remain) == 0, chunk)
if err != nil {
t.Fatal(err)
}
}
chunks++
}
if chunks < 2 {
t.Fatal("too few chunks")
}
}, func(r *http.Request) {
if !reflect.DeepEqual(r.Header, wantHeader) {
t.Errorf("Header = %#v; want %#v", r.Header, wantHeader)
}
})
}
// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field")
func TestServer_Request_CookieConcat(t *testing.T) {
const host = "example.com"
testServerRequest(t, func(st *serverTester) {
st.bodylessReq1(
":authority", host,
"cookie", "a=b",
"cookie", "c=d",
"cookie", "e=f",
)
}, func(r *http.Request) {
const want = "a=b; c=d; e=f"
if got := r.Header.Get("Cookie"); got != want {
t.Errorf("Cookie = %q; want %q", got, want)
}
})
}
func TestServer_Request_Reject_CapitalHeader(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") })
}
func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") })
}
func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") })
}
func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") })
}
func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") })
}
func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") })
}
func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") })
}
func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") })
}
func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {
// 8.1.2.3 Request Pseudo-Header Fields
// "All HTTP/2 requests MUST include exactly one valid value" ...
testRejectRequest(t, func(st *serverTester) {
st.addLogFilter("duplicate pseudo-header")
st.bodylessReq1(":method", "GET", ":method", "POST")
})
}
func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {
// 8.1.2.3 Request Pseudo-Header Fields
// "All pseudo-header fields MUST appear in the header block
// before regular header fields. Any request or response that
// contains a pseudo-header field that appears in a header
// block after a regular header field MUST be treated as
// malformed (Section 8.1.2.6)."
testRejectRequest(t, func(st *serverTester) {
st.addLogFilter("pseudo-header after regular header")
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"})
enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"})
enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"})
enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"})
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: buf.Bytes(),
EndStream: true,
EndHeaders: true,
})
})
}
func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") })
}
func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") })
}
func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {
testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") })
}
func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {
testRejectRequest(t, func(st *serverTester) {
st.addLogFilter(`invalid pseudo-header ":unknown_thing"`)
st.bodylessReq1(":unknown_thing", "")
})
}
func testRejectRequest(t *testing.T, send func(*serverTester)) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
t.Error("server request made it to handler; should've been rejected")
})
defer st.Close()
st.greet()
send(st)
st.wantRSTStream(1, ErrCodeProtocol)
}
func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
t.Error("server request made it to handler; should've been rejected")
}, optQuiet)
defer st.Close()
st.greet()
send(st)
gf := st.wantGoAway()
if gf.ErrCode != ErrCodeProtocol {
t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol)
}
}
// Section 5.1, on idle connections: "Receiving any frame other than
// HEADERS or PRIORITY on a stream in this state MUST be treated as a
// connection error (Section 5.4.1) of type PROTOCOL_ERROR."
func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) {
testRejectRequestWithProtocolError(t, func(st *serverTester) {
st.fr.WriteWindowUpdate(123, 456)
})
}
func TestRejectFrameOnIdle_Data(t *testing.T) {
testRejectRequestWithProtocolError(t, func(st *serverTester) {
st.fr.WriteData(123, true, nil)
})
}
func TestRejectFrameOnIdle_RSTStream(t *testing.T) {
testRejectRequestWithProtocolError(t, func(st *serverTester) {
st.fr.WriteRSTStream(123, ErrCodeCancel)
})
}
func TestServer_Request_Connect(t *testing.T) {
testServerRequest(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeaderRaw(
":method", "CONNECT",
":authority", "example.com:123",
),
EndStream: true,
EndHeaders: true,
})
}, func(r *http.Request) {
if g, w := r.Method, "CONNECT"; g != w {
t.Errorf("Method = %q; want %q", g, w)
}
if g, w := r.RequestURI, "example.com:123"; g != w {
t.Errorf("RequestURI = %q; want %q", g, w)
}
if g, w := r.URL.Host, "example.com:123"; g != w {
t.Errorf("URL.Host = %q; want %q", g, w)
}
})
}
func TestServer_Request_Connect_InvalidPath(t *testing.T) {
testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeaderRaw(
":method", "CONNECT",
":authority", "example.com:123",
":path", "/bogus",
),
EndStream: true,
EndHeaders: true,
})
})
}
func TestServer_Request_Connect_InvalidScheme(t *testing.T) {
testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeaderRaw(
":method", "CONNECT",
":authority", "example.com:123",
":scheme", "https",
),
EndStream: true,
EndHeaders: true,
})
})
}
func TestServer_Ping(t *testing.T) {
st := newServerTester(t, nil)
defer st.Close()
st.greet()
// Server should ignore this one, since it has ACK set.
ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
if err := st.fr.WritePing(true, ackPingData); err != nil {
t.Fatal(err)
}
// But the server should reply to this one, since ACK is false.
pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
if err := st.fr.WritePing(false, pingData); err != nil {
t.Fatal(err)
}
pf := st.wantPing()
if !pf.Flags.Has(FlagPingAck) {
t.Error("response ping doesn't have ACK set")
}
if pf.Data != pingData {
t.Errorf("response ping has data %q; want %q", pf.Data, pingData)
}
}
func TestServer_RejectsLargeFrames(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("see golang.org/issue/13434")
}
st := newServerTester(t, nil)
defer st.Close()
st.greet()
// Write too large of a frame (too large by one byte)
// We ignore the return value because it's expected that the server
// will only read the first 9 bytes (the headre) and then disconnect.
st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))
gf := st.wantGoAway()
if gf.ErrCode != ErrCodeFrameSize {
t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize)
}
if st.serverLogBuf.Len() != 0 {
// Previously we spun here for a bit until the GOAWAY disconnect
// timer fired, logging while we fired.
t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes())
}
}
func TestServer_Handler_Sends_WindowUpdate(t *testing.T) {
puppet := newHandlerPuppet()
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
puppet.act(w, r)
})
defer st.Close()
defer puppet.done()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // data coming
EndHeaders: true,
})
st.writeData(1, false, []byte("abcdef"))
puppet.do(readBodyHandler(t, "abc"))
st.wantWindowUpdate(0, 3)
st.wantWindowUpdate(1, 3)
puppet.do(readBodyHandler(t, "def"))
st.wantWindowUpdate(0, 3)
st.wantWindowUpdate(1, 3)
st.writeData(1, true, []byte("ghijkl")) // END_STREAM here
puppet.do(readBodyHandler(t, "ghi"))
puppet.do(readBodyHandler(t, "jkl"))
st.wantWindowUpdate(0, 3)
st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM
}
// the version of the TestServer_Handler_Sends_WindowUpdate with padding.
// See golang.org/issue/16556
func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {
puppet := newHandlerPuppet()
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
puppet.act(w, r)
})
defer st.Close()
defer puppet.done()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false,
EndHeaders: true,
})
st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0})
// Expect to immediately get our 5 bytes of padding back for
// both the connection and stream (4 bytes of padding + 1 byte of length)
st.wantWindowUpdate(0, 5)
st.wantWindowUpdate(1, 5)
puppet.do(readBodyHandler(t, "abc"))
st.wantWindowUpdate(0, 3)
st.wantWindowUpdate(1, 3)
puppet.do(readBodyHandler(t, "def"))
st.wantWindowUpdate(0, 3)
st.wantWindowUpdate(1, 3)
}
func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {
st := newServerTester(t, nil)
defer st.Close()
st.greet()
if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {
t.Fatal(err)
}
gf := st.wantGoAway()
if gf.ErrCode != ErrCodeFlowControl {
t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl)
}
if gf.LastStreamID != 0 {
t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0)
}
}
func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {
inHandler := make(chan bool)
blockHandler := make(chan bool)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
inHandler <- true
<-blockHandler
})
defer st.Close()
defer close(blockHandler)
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // keep it open
EndHeaders: true,
})
<-inHandler
// Send a bogus window update:
if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {
t.Fatal(err)
}
st.wantRSTStream(1, ErrCodeFlowControl)
}
// testServerPostUnblock sends a hanging POST with unsent data to handler,
// then runs fn once in the handler, and verifies that the error returned from
// handler is acceptable. It fails if takes over 5 seconds for handler to exit.
func testServerPostUnblock(t *testing.T,
handler func(http.ResponseWriter, *http.Request) error,
fn func(*serverTester),
checkErr func(error),
otherHeaders ...string) {
inHandler := make(chan bool)
errc := make(chan error, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
inHandler <- true
errc <- handler(w, r)
})
defer st.Close()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...),
EndStream: false, // keep it open
EndHeaders: true,
})
<-inHandler
fn(st)
select {
case err := <-errc:
if checkErr != nil {
checkErr(err)
}
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for Handler to return")
}
}
func TestServer_RSTStream_Unblocks_Read(t *testing.T) {
testServerPostUnblock(t,
func(w http.ResponseWriter, r *http.Request) (err error) {
_, err = r.Body.Read(make([]byte, 1))
return
},
func(st *serverTester) {
if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
t.Fatal(err)
}
},
func(err error) {
want := StreamError{StreamID: 0x1, Code: 0x8}
if !reflect.DeepEqual(err, want) {
t.Errorf("Read error = %v; want %v", err, want)
}
},
)
}
func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
// Run this test a bunch, because it doesn't always
// deadlock. But with a bunch, it did.
n := 50
if testing.Short() {
n = 5
}
for i := 0; i < n; i++ {
testServer_RSTStream_Unblocks_Header_Write(t)
}
}
func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) {
inHandler := make(chan bool, 1)
unblockHandler := make(chan bool, 1)
headerWritten := make(chan bool, 1)
wroteRST := make(chan bool, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
inHandler <- true
<-wroteRST
w.Header().Set("foo", "bar")
w.WriteHeader(200)
w.(http.Flusher).Flush()
headerWritten <- true
<-unblockHandler
})
defer st.Close()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // keep it open
EndHeaders: true,
})
<-inHandler
if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
t.Fatal(err)
}
wroteRST <- true
st.awaitIdle()
select {
case <-headerWritten:
case <-time.After(2 * time.Second):
t.Error("timeout waiting for header write")
}
unblockHandler <- true
}
func TestServer_DeadConn_Unblocks_Read(t *testing.T) {
testServerPostUnblock(t,
func(w http.ResponseWriter, r *http.Request) (err error) {
_, err = r.Body.Read(make([]byte, 1))
return
},
func(st *serverTester) { st.cc.Close() },
func(err error) {
if err == nil {
t.Error("unexpected nil error from Request.Body.Read")
}
},
)
}
var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {
<-w.(http.CloseNotifier).CloseNotify()
return nil
}
func TestServer_CloseNotify_After_RSTStream(t *testing.T) {
testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
t.Fatal(err)
}
}, nil)
}
func TestServer_CloseNotify_After_ConnClose(t *testing.T) {
testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)
}
// that CloseNotify unblocks after a stream error due to the client's
// problem that's unrelated to them explicitly canceling it (which is
// TestServer_CloseNotify_After_RSTStream above)
func TestServer_CloseNotify_After_StreamError(t *testing.T) {
testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {
// data longer than declared Content-Length => stream error
st.writeData(1, true, []byte("1234"))
}, nil, "content-length", "3")
}
func TestServer_StateTransitions(t *testing.T) {
var st *serverTester
inHandler := make(chan bool)
writeData := make(chan bool)
leaveHandler := make(chan bool)
st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
inHandler <- true
if st.stream(1) == nil {
t.Errorf("nil stream 1 in handler")
}
if got, want := st.streamState(1), stateOpen; got != want {
t.Errorf("in handler, state is %v; want %v", got, want)
}
writeData <- true
if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {
t.Errorf("body read = %d, %v; want 0, EOF", n, err)
}
if got, want := st.streamState(1), stateHalfClosedRemote; got != want {
t.Errorf("in handler, state is %v; want %v", got, want)
}
<-leaveHandler
})
st.greet()
if st.stream(1) != nil {
t.Fatal("stream 1 should be empty")
}
if got := st.streamState(1); got != stateIdle {
t.Fatalf("stream 1 should be idle; got %v", got)
}
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false, // keep it open
EndHeaders: true,
})
<-inHandler
<-writeData
st.writeData(1, true, nil)
leaveHandler <- true
hf := st.wantHeaders()
if !hf.StreamEnded() {
t.Fatal("expected END_STREAM flag")
}
if got, want := st.streamState(1), stateClosed; got != want {
t.Errorf("at end, state is %v; want %v", got, want)
}
if st.stream(1) != nil {
t.Fatal("at end, stream 1 should be gone")
}
}
// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)
func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: false,
})
st.writeHeaders(HeadersFrameParam{ // Not a continuation.
StreamID: 3, // different stream.
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
})
})
}
// test HEADERS w/o EndHeaders + PING (should get rejected)
func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: false,
})
if err := st.fr.WritePing(false, [8]byte{}); err != nil {
t.Fatal(err)
}
})
}
// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)
func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
})
st.wantHeaders()
if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
t.Fatal(err)
}
})
}
// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID
func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: false,
})
if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil {
t.Fatal(err)
}
})
}
// No HEADERS on stream 0.
func TestServer_Rejects_Headers0(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.fr.AllowIllegalWrites = true
st.writeHeaders(HeadersFrameParam{
StreamID: 0,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
})
})
}
// No CONTINUATION on stream 0.
func TestServer_Rejects_Continuation0(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.fr.AllowIllegalWrites = true
if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {
t.Fatal(err)
}
})
}
// No PRIORITY on stream 0.
func TestServer_Rejects_Priority0(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
st.fr.AllowIllegalWrites = true
st.writePriority(0, PriorityParam{StreamDep: 1})
})
}
// No HEADERS frame with a self-dependence.
func TestServer_Rejects_HeadersSelfDependence(t *testing.T) {
testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
st.fr.AllowIllegalWrites = true
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
Priority: PriorityParam{StreamDep: 1},
})
})
}
// No PRIORTY frame with a self-dependence.
func TestServer_Rejects_PrioritySelfDependence(t *testing.T) {
testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {
st.fr.AllowIllegalWrites = true
st.writePriority(1, PriorityParam{StreamDep: 1})
})
}
func TestServer_Rejects_PushPromise(t *testing.T) {
testServerRejectsConn(t, func(st *serverTester) {
pp := PushPromiseParam{
StreamID: 1,
PromiseID: 3,
}
if err := st.fr.WritePushPromise(pp); err != nil {
t.Fatal(err)
}
})
}
// testServerRejectsConn tests that the server hangs up with a GOAWAY
// frame and a server close after the client does something
// deserving a CONNECTION_ERROR.
func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
st.addLogFilter("connection error: PROTOCOL_ERROR")
defer st.Close()
st.greet()
writeReq(st)
st.wantGoAway()
errc := make(chan error, 1)
go func() {
fr, err := st.fr.ReadFrame()
if err == nil {
err = fmt.Errorf("got frame of type %T", fr)
}
errc <- err
}()
select {
case err := <-errc:
if err != io.EOF {
t.Errorf("ReadFrame = %v; want io.EOF", err)
}
case <-time.After(2 * time.Second):
t.Error("timeout waiting for disconnect")
}
}
// testServerRejectsStream tests that the server sends a RST_STREAM with the provided
// error code after a client sends a bogus request.
func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
defer st.Close()
st.greet()
writeReq(st)
st.wantRSTStream(1, code)
}
// testServerRequest sets up an idle HTTP/2 connection and lets you
// write a single request with writeReq, and then verify that the
// *http.Request is built correctly in checkReq.
func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {
gotReq := make(chan bool, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
if r.Body == nil {
t.Fatal("nil Body")
}
checkReq(r)
gotReq <- true
})
defer st.Close()
st.greet()
writeReq(st)
select {
case <-gotReq:
case <-time.After(2 * time.Second):
t.Error("timeout waiting for request")
}
}
func getSlash(st *serverTester) { st.bodylessReq1() }
func TestServer_Response_NoData(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
// Nothing.
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if !hf.StreamEnded() {
t.Fatal("want END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
})
}
func TestServer_Response_NoData_Header_FooBar(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("Foo-Bar", "some-value")
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if !hf.StreamEnded() {
t.Fatal("want END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"foo-bar", "some-value"},
{"content-length", "0"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
})
}
func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {
const msg = "<html>this is HTML."
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("Content-Type", "foo/bar")
io.WriteString(w, msg)
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("don't want END_STREAM, expecting data")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "foo/bar"},
{"content-length", strconv.Itoa(len(msg))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
df := st.wantData()
if !df.StreamEnded() {
t.Error("expected DATA to have END_STREAM flag")
}
if got := string(df.Data()); got != msg {
t.Errorf("got DATA %q; want %q", got, msg)
}
})
}
func TestServer_Response_Nosniff_WithoutContentType(t *testing.T) {
const msg = "<html>this is HTML."
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(200)
io.WriteString(w, msg)
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("don't want END_STREAM, expecting data")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"x-content-type-options", "nosniff"},
{"content-type", "application/octet-stream"},
{"content-length", strconv.Itoa(len(msg))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
df := st.wantData()
if !df.StreamEnded() {
t.Error("expected DATA to have END_STREAM flag")
}
if got := string(df.Data()); got != msg {
t.Errorf("got DATA %q; want %q", got, msg)
}
})
}
func TestServer_Response_TransferEncoding_chunked(t *testing.T) {
const msg = "hi"
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("Transfer-Encoding", "chunked") // should be stripped
io.WriteString(w, msg)
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "text/plain; charset=utf-8"},
{"content-length", strconv.Itoa(len(msg))},
{"x-content-type-options", "nosniff"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
})
}
// Header accessed only after the initial write.
func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {
const msg = "<html>this is HTML."
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
io.WriteString(w, msg)
w.Header().Set("foo", "should be ignored")
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "text/html; charset=utf-8"},
{"content-length", strconv.Itoa(len(msg))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
})
}
// Header accessed before the initial write and later mutated.
func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {
const msg = "<html>this is HTML."
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("foo", "proper value")
io.WriteString(w, msg)
w.Header().Set("foo", "should be ignored")
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"foo", "proper value"},
{"content-type", "text/html; charset=utf-8"},
{"content-length", strconv.Itoa(len(msg))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
})
}
func TestServer_Response_Data_SniffLenType(t *testing.T) {
const msg = "<html>this is HTML."
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
io.WriteString(w, msg)
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("don't want END_STREAM, expecting data")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "text/html; charset=utf-8"},
{"content-length", strconv.Itoa(len(msg))},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
df := st.wantData()
if !df.StreamEnded() {
t.Error("expected DATA to have END_STREAM flag")
}
if got := string(df.Data()); got != msg {
t.Errorf("got DATA %q; want %q", got, msg)
}
})
}
func TestServer_Response_Header_Flush_MidWrite(t *testing.T) {
const msg = "<html>this is HTML"
const msg2 = ", and this is the next chunk"
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
io.WriteString(w, msg)
w.(http.Flusher).Flush()
io.WriteString(w, msg2)
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "text/html; charset=utf-8"}, // sniffed
// and no content-length
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
{
df := st.wantData()
if df.StreamEnded() {
t.Error("unexpected END_STREAM flag")
}
if got := string(df.Data()); got != msg {
t.Errorf("got DATA %q; want %q", got, msg)
}
}
{
df := st.wantData()
if !df.StreamEnded() {
t.Error("wanted END_STREAM flag on last data chunk")
}
if got := string(df.Data()); got != msg2 {
t.Errorf("got DATA %q; want %q", got, msg2)
}
}
})
}
func TestServer_Response_LargeWrite(t *testing.T) {
const size = 1 << 20
const maxFrameSize = 16 << 10
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
n, err := w.Write(bytes.Repeat([]byte("a"), size))
if err != nil {
return fmt.Errorf("Write error: %v", err)
}
if n != size {
return fmt.Errorf("wrong size %d from Write", n)
}
return nil
}, func(st *serverTester) {
if err := st.fr.WriteSettings(
Setting{SettingInitialWindowSize, 0},
Setting{SettingMaxFrameSize, maxFrameSize},
); err != nil {
t.Fatal(err)
}
st.wantSettingsAck()
getSlash(st) // make the single request
// Give the handler quota to write:
if err := st.fr.WriteWindowUpdate(1, size); err != nil {
t.Fatal(err)
}
// Give the handler quota to write to connection-level
// window as well
if err := st.fr.WriteWindowUpdate(0, size); err != nil {
t.Fatal(err)
}
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"content-type", "text/plain; charset=utf-8"}, // sniffed
{"x-content-type-options", "nosniff"},
// and no content-length
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
var bytes, frames int
for {
df := st.wantData()
bytes += len(df.Data())
frames++
for _, b := range df.Data() {
if b != 'a' {
t.Fatal("non-'a' byte seen in DATA")
}
}
if df.StreamEnded() {
break
}
}
if bytes != size {
t.Errorf("Got %d bytes; want %d", bytes, size)
}
if want := int(size / maxFrameSize); frames < want || frames > want*2 {
t.Errorf("Got %d frames; want %d", frames, size)
}
})
}
// Test that the handler can't write more than the client allows
func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {
// Make these reads. Before each read, the client adds exactly enough
// flow-control to satisfy the read. Numbers chosen arbitrarily.
reads := []int{123, 1, 13, 127}
size := 0
for _, n := range reads {
size += n
}
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.(http.Flusher).Flush()
n, err := w.Write(bytes.Repeat([]byte("a"), size))
if err != nil {
return fmt.Errorf("Write error: %v", err)
}
if n != size {
return fmt.Errorf("wrong size %d from Write", n)
}
return nil
}, func(st *serverTester) {
// Set the window size to something explicit for this test.
// It's also how much initial data we expect.
if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil {
t.Fatal(err)
}
st.wantSettingsAck()
getSlash(st) // make the single request
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
df := st.wantData()
if got := len(df.Data()); got != reads[0] {
t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got)
}
for _, quota := range reads[1:] {
if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {
t.Fatal(err)
}
df := st.wantData()
if int(quota) != len(df.Data()) {
t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota)
}
}
})
}
// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.
func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {
const size = 1 << 20
const maxFrameSize = 16 << 10
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.(http.Flusher).Flush()
errc := make(chan error, 1)
go func() {
_, err := w.Write(bytes.Repeat([]byte("a"), size))
errc <- err
}()
select {
case err := <-errc:
if err == nil {
return errors.New("unexpected nil error from Write in handler")
}
return nil
case <-time.After(2 * time.Second):
return errors.New("timeout waiting for Write in handler")
}
}, func(st *serverTester) {
if err := st.fr.WriteSettings(
Setting{SettingInitialWindowSize, 0},
Setting{SettingMaxFrameSize, maxFrameSize},
); err != nil {
t.Fatal(err)
}
st.wantSettingsAck()
getSlash(st) // make the single request
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {
t.Fatal(err)
}
})
}
func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.(http.Flusher).Flush()
// Nothing; send empty DATA
return nil
}, func(st *serverTester) {
// Handler gets no data quota:
if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {
t.Fatal(err)
}
st.wantSettingsAck()
getSlash(st) // make the single request
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
df := st.wantData()
if got := len(df.Data()); got != 0 {
t.Fatalf("unexpected %d DATA bytes; want 0", got)
}
if !df.StreamEnded() {
t.Fatal("DATA didn't have END_STREAM")
}
})
}
func TestServer_Response_Automatic100Continue(t *testing.T) {
const msg = "foo"
const reply = "bar"
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
if v := r.Header.Get("Expect"); v != "" {
t.Errorf("Expect header = %q; want empty", v)
}
buf := make([]byte, len(msg))
// This read should trigger the 100-continue being sent.
if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {
return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg)
}
_, err := io.WriteString(w, reply)
return err
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"),
EndStream: false,
EndHeaders: true,
})
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "100"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Fatalf("Got headers %v; want %v", goth, wanth)
}
// Okay, they sent status 100, so we can send our
// gigantic and/or sensitive "foo" payload now.
st.writeData(1, true, []byte(msg))
st.wantWindowUpdate(0, uint32(len(msg)))
hf = st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("expected data to follow")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
goth = st.decodeHeader(hf.HeaderBlockFragment())
wanth = [][2]string{
{":status", "200"},
{"content-type", "text/plain; charset=utf-8"},
{"content-length", strconv.Itoa(len(reply))},
{"x-content-type-options", "nosniff"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
df := st.wantData()
if string(df.Data()) != reply {
t.Errorf("Client read %q; want %q", df.Data(), reply)
}
if !df.StreamEnded() {
t.Errorf("expect data stream end")
}
})
}
func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {
errc := make(chan error, 1)
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
p := []byte("some data.\n")
for {
_, err := w.Write(p)
if err != nil {
errc <- err
return nil
}
}
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: false,
EndHeaders: true,
})
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("unexpected END_STREAM flag")
}
if !hf.HeadersEnded() {
t.Fatal("want END_HEADERS flag")
}
// Close the connection and wait for the handler to (hopefully) notice.
st.cc.Close()
select {
case <-errc:
case <-time.After(5 * time.Second):
t.Error("timeout")
}
})
}
func TestServer_Rejects_Too_Many_Streams(t *testing.T) {
const testPath = "/some/path"
inHandler := make(chan uint32)
leaveHandler := make(chan bool)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
id := w.(*responseWriter).rws.stream.id
inHandler <- id
if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {
t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath)
}
<-leaveHandler
})
defer st.Close()
st.greet()
nextStreamID := uint32(1)
streamID := func() uint32 {
defer func() { nextStreamID += 2 }()
return nextStreamID
}
sendReq := func(id uint32, headers ...string) {
st.writeHeaders(HeadersFrameParam{
StreamID: id,
BlockFragment: st.encodeHeader(headers...),
EndStream: true,
EndHeaders: true,
})
}
for i := 0; i < defaultMaxStreams; i++ {
sendReq(streamID())
<-inHandler
}
defer func() {
for i := 0; i < defaultMaxStreams; i++ {
leaveHandler <- true
}
}()
// And this one should cross the limit:
// (It's also sent as a CONTINUATION, to verify we still track the decoder context,
// even if we're rejecting it)
rejectID := streamID()
headerBlock := st.encodeHeader(":path", testPath)
frag1, frag2 := headerBlock[:3], headerBlock[3:]
st.writeHeaders(HeadersFrameParam{
StreamID: rejectID,
BlockFragment: frag1,
EndStream: true,
EndHeaders: false, // CONTINUATION coming
})
if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {
t.Fatal(err)
}
st.wantRSTStream(rejectID, ErrCodeProtocol)
// But let a handler finish:
leaveHandler <- true
st.wantHeaders()
// And now another stream should be able to start:
goodID := streamID()
sendReq(goodID, ":path", testPath)
select {
case got := <-inHandler:
if got != goodID {
t.Errorf("Got stream %d; want %d", got, goodID)
}
case <-time.After(3 * time.Second):
t.Error("timeout waiting for handler")
}
}
// So many response headers that the server needs to use CONTINUATION frames:
func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
h := w.Header()
for i := 0; i < 5000; i++ {
h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i))
}
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.HeadersEnded() {
t.Fatal("got unwanted END_HEADERS flag")
}
n := 0
for {
n++
cf := st.wantContinuation()
if cf.HeadersEnded() {
break
}
}
if n < 5 {
t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n)
}
})
}
// This previously crashed (reported by Mathieu Lonjaret as observed
// while using Camlistore) because we got a DATA frame from the client
// after the handler exited and our logic at the time was wrong,
// keeping a stream in the map in stateClosed, which tickled an
// invariant check later when we tried to remove that stream (via
// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop
// ended.
func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
// nothing
return nil
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: false, // DATA is coming
EndHeaders: true,
})
hf := st.wantHeaders()
if !hf.HeadersEnded() || !hf.StreamEnded() {
t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf)
}
// Sent when the a Handler closes while a client has
// indicated it's still sending DATA:
st.wantRSTStream(1, ErrCodeNo)
// Now the handler has ended, so it's ended its
// stream, but the client hasn't closed its side
// (stateClosedLocal). So send more data and verify
// it doesn't crash with an internal invariant panic, like
// it did before.
st.writeData(1, true, []byte("foo"))
// Get our flow control bytes back, since the handler didn't get them.
st.wantWindowUpdate(0, uint32(len("foo")))
// Sent after a peer sends data anyway (admittedly the
// previous RST_STREAM might've still been in-flight),
// but they'll get the more friendly 'cancel' code
// first.
st.wantRSTStream(1, ErrCodeStreamClosed)
// Set up a bunch of machinery to record the panic we saw
// previously.
var (
panMu sync.Mutex
panicVal interface{}
)
testHookOnPanicMu.Lock()
testHookOnPanic = func(sc *serverConn, pv interface{}) bool {
panMu.Lock()
panicVal = pv
panMu.Unlock()
return true
}
testHookOnPanicMu.Unlock()
// Now force the serve loop to end, via closing the connection.
st.cc.Close()
select {
case <-st.sc.doneServing:
// Loop has exited.
panMu.Lock()
got := panicVal
panMu.Unlock()
if got != nil {
t.Errorf("Got panic: %v", got)
}
case <-time.After(5 * time.Second):
t.Error("timeout")
}
})
}
func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }
func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }
func testRejectTLS(t *testing.T, max uint16) {
st := newServerTester(t, nil, func(c *tls.Config) {
c.MaxVersion = max
})
defer st.Close()
gf := st.wantGoAway()
if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
t.Errorf("Got error code %v; want %v", got, want)
}
}
func TestServer_Rejects_TLSBadCipher(t *testing.T) {
st := newServerTester(t, nil, func(c *tls.Config) {
// Only list bad ones:
c.CipherSuites = []uint16{
tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
}
})
defer st.Close()
gf := st.wantGoAway()
if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {
t.Errorf("Got error code %v; want %v", got, want)
}
}
func TestServer_Advertises_Common_Cipher(t *testing.T) {
const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
st := newServerTester(t, nil, func(c *tls.Config) {
// Have the client only support the one required by the spec.
c.CipherSuites = []uint16{requiredSuite}
}, func(ts *httptest.Server) {
var srv *http.Server = ts.Config
// Have the server configured with no specific cipher suites.
// This tests that Go's defaults include the required one.
srv.TLSConfig = nil
})
defer st.Close()
st.greet()
}
func (st *serverTester) onHeaderField(f hpack.HeaderField) {
if f.Name == "date" {
return
}
st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})
}
func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {
st.decodedHeaders = nil
if _, err := st.hpackDec.Write(headerBlock); err != nil {
st.t.Fatalf("hpack decoding error: %v", err)
}
if err := st.hpackDec.Close(); err != nil {
st.t.Fatalf("hpack decoding error: %v", err)
}
return st.decodedHeaders
}
// testServerResponse sets up an idle HTTP/2 connection. The client function should
// write a single request that must be handled by the handler. This waits up to 5s
// for client to return, then up to an additional 2s for the handler to return.
func testServerResponse(t testing.TB,
handler func(http.ResponseWriter, *http.Request) error,
client func(*serverTester),
) {
errc := make(chan error, 1)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
if r.Body == nil {
t.Fatal("nil Body")
}
errc <- handler(w, r)
})
defer st.Close()
donec := make(chan bool)
go func() {
defer close(donec)
st.greet()
client(st)
}()
select {
case <-donec:
case <-time.After(5 * time.Second):
t.Fatal("timeout in client")
}
select {
case err := <-errc:
if err != nil {
t.Fatalf("Error in handler: %v", err)
}
case <-time.After(2 * time.Second):
t.Fatal("timeout in handler")
}
}
// readBodyHandler returns an http Handler func that reads len(want)
// bytes from r.Body and fails t if the contents read were not
// the value of want.
func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
buf := make([]byte, len(want))
_, err := io.ReadFull(r.Body, buf)
if err != nil {
t.Error(err)
return
}
if string(buf) != want {
t.Errorf("read %q; want %q", buf, want)
}
}
}
// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
// http://sourceforge.net/p/curl/bugs/1472/
func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
if runtime.GOOS != "linux" {
t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
}
if testing.Short() {
t.Skip("skipping curl test in short mode")
}
requireCurl(t)
var gotConn int32
testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
const msg = "Hello from curl!\n"
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Foo", "Bar")
w.Header().Set("Client-Proto", r.Proto)
io.WriteString(w, msg)
}))
ConfigureServer(ts.Config, &Server{
PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
})
ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
ts.StartTLS()
defer ts.Close()
t.Logf("Running test server for curl to hit at: %s", ts.URL)
container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
defer kill(container)
resc := make(chan interface{}, 1)
go func() {
res, err := dockerLogs(container)
if err != nil {
resc <- err
} else {
resc <- res
}
}()
select {
case res := <-resc:
if err, ok := res.(error); ok {
t.Fatal(err)
}
body := string(res.([]byte))
// Search for both "key: value" and "key:value", since curl changed their format
// Our Dockerfile contains the latest version (no space), but just in case people
// didn't rebuild, check both.
if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") {
t.Errorf("didn't see foo: Bar header")
t.Logf("Got: %s", body)
}
if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") {
t.Errorf("didn't see client-proto: HTTP/2 header")
t.Logf("Got: %s", res)
}
if !strings.Contains(string(res.([]byte)), msg) {
t.Errorf("didn't see %q content", msg)
t.Logf("Got: %s", res)
}
case <-time.After(3 * time.Second):
t.Errorf("timeout waiting for curl")
}
if atomic.LoadInt32(&gotConn) == 0 {
t.Error("never saw an http2 connection")
}
}
var doh2load = flag.Bool("h2load", false, "Run h2load test")
func TestServerWithH2Load(t *testing.T) {
if !*doh2load {
t.Skip("Skipping without --h2load flag.")
}
if runtime.GOOS != "linux" {
t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
}
requireH2load(t)
msg := strings.Repeat("Hello, h2load!\n", 5000)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, msg)
w.(http.Flusher).Flush()
io.WriteString(w, msg)
}))
ts.StartTLS()
defer ts.Close()
cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl",
"-n100000", "-c100", "-m100", ts.URL)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
}
// Issue 12843
func TestServerDoS_MaxHeaderListSize(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})
defer st.Close()
// shake hands
frameSize := defaultMaxReadFrameSize
var advHeaderListSize *uint32
st.greetAndCheckSettings(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
if s.Val < minMaxFrameSize {
frameSize = minMaxFrameSize
} else if s.Val > maxFrameSize {
frameSize = maxFrameSize
} else {
frameSize = int(s.Val)
}
case SettingMaxHeaderListSize:
advHeaderListSize = &s.Val
}
return nil
})
if advHeaderListSize == nil {
t.Errorf("server didn't advertise a max header list size")
} else if *advHeaderListSize == 0 {
t.Errorf("server advertised a max header list size of 0")
}
st.encodeHeaderField(":method", "GET")
st.encodeHeaderField(":path", "/")
st.encodeHeaderField(":scheme", "https")
cookie := strings.Repeat("*", 4058)
st.encodeHeaderField("cookie", cookie)
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.headerBuf.Bytes(),
EndStream: true,
EndHeaders: false,
})
// Capture the short encoding of a duplicate ~4K cookie, now
// that we've already sent it once.
st.headerBuf.Reset()
st.encodeHeaderField("cookie", cookie)
// Now send 1MB of it.
const size = 1 << 20
b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len())
for len(b) > 0 {
chunk := b
if len(chunk) > frameSize {
chunk = chunk[:frameSize]
}
b = b[len(chunk):]
st.fr.WriteContinuation(1, len(b) == 0, chunk)
}
h := st.wantHeaders()
if !h.HeadersEnded() {
t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
}
headers := st.decodeHeader(h.HeaderBlockFragment())
want := [][2]string{
{":status", "431"},
{"content-type", "text/html; charset=utf-8"},
{"content-length", "63"},
}
if !reflect.DeepEqual(headers, want) {
t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
}
}
func TestCompressionErrorOnWrite(t *testing.T) {
const maxStrLen = 8 << 10
var serverConfig *http.Server
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
}, func(ts *httptest.Server) {
serverConfig = ts.Config
serverConfig.MaxHeaderBytes = maxStrLen
})
st.addLogFilter("connection error: COMPRESSION_ERROR")
defer st.Close()
st.greet()
maxAllowed := st.sc.framer.maxHeaderStringLen()
// Crank this up, now that we have a conn connected with the
// hpack.Decoder's max string length set has been initialized
// from the earlier low ~8K value. We want this higher so don't
// hit the max header list size. We only want to test hitting
// the max string size.
serverConfig.MaxHeaderBytes = 1 << 20
// First a request with a header that's exactly the max allowed size
// for the hpack compression. It's still too long for the header list
// size, so we'll get the 431 error, but that keeps the compression
// context still valid.
hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
h := st.wantHeaders()
if !h.HeadersEnded() {
t.Fatalf("Got HEADERS without END_HEADERS set: %v", h)
}
headers := st.decodeHeader(h.HeaderBlockFragment())
want := [][2]string{
{":status", "431"},
{"content-type", "text/html; charset=utf-8"},
{"content-length", "63"},
}
if !reflect.DeepEqual(headers, want) {
t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
}
df := st.wantData()
if !strings.Contains(string(df.Data()), "HTTP Error 431") {
t.Errorf("Unexpected data body: %q", df.Data())
}
if !df.StreamEnded() {
t.Fatalf("expect data stream end")
}
// And now send one that's just one byte too big.
hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
st.writeHeaders(HeadersFrameParam{
StreamID: 3,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}
func TestCompressionErrorOnClose(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
})
st.addLogFilter("connection error: COMPRESSION_ERROR")
defer st.Close()
st.greet()
hbf := st.encodeHeader("foo", "bar")
hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}
// test that a server handler can read trailers from a client
func TestServerReadsTrailers(t *testing.T) {
const testBody = "some test body"
writeReq := func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"),
EndStream: false,
EndHeaders: true,
})
st.writeData(1, false, []byte(testBody))
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeaderRaw(
"foo", "foov",
"bar", "barv",
"baz", "bazv",
"surprise", "wasn't declared; shouldn't show up",
),
EndStream: true,
EndHeaders: true,
})
}
checkReq := func(r *http.Request) {
wantTrailer := http.Header{
"Foo": nil,
"Bar": nil,
"Baz": nil,
}
if !reflect.DeepEqual(r.Trailer, wantTrailer) {
t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer)
}
slurp, err := ioutil.ReadAll(r.Body)
if string(slurp) != testBody {
t.Errorf("read body %q; want %q", slurp, testBody)
}
if err != nil {
t.Fatalf("Body slurp: %v", err)
}
wantTrailerAfter := http.Header{
"Foo": {"foov"},
"Bar": {"barv"},
"Baz": {"bazv"},
}
if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) {
t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter)
}
}
testServerRequest(t, writeReq, checkReq)
}
// test that a server handler can send trailers
func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) }
func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) }
func testServerWritesTrailers(t *testing.T, withFlush bool) {
// See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B")
w.Header().Add("Trailer", "Server-Trailer-C")
w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered
// Regular headers:
w.Header().Set("Foo", "Bar")
w.Header().Set("Content-Length", "5") // len("Hello")
io.WriteString(w, "Hello")
if withFlush {
w.(http.Flusher).Flush()
}
w.Header().Set("Server-Trailer-A", "valuea")
w.Header().Set("Server-Trailer-C", "valuec") // skipping B
// After a flush, random keys like Server-Surprise shouldn't show up:
w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!")
// But we do permit promoting keys to trailers after a
// flush if they start with the magic
// otherwise-invalid "Trailer:" prefix:
w.Header().Set("Trailer:Post-Header-Trailer", "hi1")
w.Header().Set("Trailer:post-header-trailer2", "hi2")
w.Header().Set("Trailer:Range", "invalid")
w.Header().Set("Trailer:Foo\x01Bogus", "invalid")
w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 7230 4.1.2")
w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 7230 4.1.2")
w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2")
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if hf.StreamEnded() {
t.Fatal("response HEADERS had END_STREAM")
}
if !hf.HeadersEnded() {
t.Fatal("response HEADERS didn't have END_HEADERS")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"foo", "Bar"},
{"trailer", "Server-Trailer-A, Server-Trailer-B"},
{"trailer", "Server-Trailer-C"},
{"trailer", "Transfer-Encoding, Content-Length, Trailer"},
{"content-type", "text/plain; charset=utf-8"},
{"content-length", "5"},
{"x-content-type-options", "nosniff"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
}
df := st.wantData()
if string(df.Data()) != "Hello" {
t.Fatalf("Client read %q; want Hello", df.Data())
}
if df.StreamEnded() {
t.Fatalf("data frame had STREAM_ENDED")
}
tf := st.wantHeaders() // for the trailers
if !tf.StreamEnded() {
t.Fatalf("trailers HEADERS lacked END_STREAM")
}
if !tf.HeadersEnded() {
t.Fatalf("trailers HEADERS lacked END_HEADERS")
}
wanth = [][2]string{
{"post-header-trailer", "hi1"},
{"post-header-trailer2", "hi2"},
{"server-trailer-a", "valuea"},
{"server-trailer-c", "valuec"},
}
goth = st.decodeHeader(tf.HeaderBlockFragment())
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
}
})
}
// validate transmitted header field names & values
// golang.org/issue/14048
func TestServerDoesntWriteInvalidHeaders(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
w.Header().Add("OK1", "x")
w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key
w.Header().Add("Bad1\x00", "x") // null in key
w.Header().Add("Bad2", "x\x00y") // null in value
return nil
}, func(st *serverTester) {
getSlash(st)
hf := st.wantHeaders()
if !hf.StreamEnded() {
t.Error("response HEADERS lacked END_STREAM")
}
if !hf.HeadersEnded() {
t.Fatal("response HEADERS didn't have END_HEADERS")
}
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "200"},
{"ok1", "x"},
{"content-length", "0"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth)
}
})
}
func BenchmarkServerGets(b *testing.B) {
defer disableGoroutineTracking()()
b.ReportAllocs()
const msg = "Hello, world"
st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, msg)
})
defer st.Close()
st.greet()
// Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
id := 1 + uint32(i)*2
st.writeHeaders(HeadersFrameParam{
StreamID: id,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
})
st.wantHeaders()
df := st.wantData()
if !df.StreamEnded() {
b.Fatalf("DATA didn't have END_STREAM; got %v", df)
}
}
}
func BenchmarkServerPosts(b *testing.B) {
defer disableGoroutineTracking()()
b.ReportAllocs()
const msg = "Hello, world"
st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
// Consume the (empty) body from th peer before replying, otherwise
// the server will sometimes (depending on scheduling) send the peer a
// a RST_STREAM with the CANCEL error code.
if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
}
io.WriteString(w, msg)
})
defer st.Close()
st.greet()
// Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
id := 1 + uint32(i)*2
st.writeHeaders(HeadersFrameParam{
StreamID: id,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false,
EndHeaders: true,
})
st.writeData(id, true, nil)
st.wantHeaders()
df := st.wantData()
if !df.StreamEnded() {
b.Fatalf("DATA didn't have END_STREAM; got %v", df)
}
}
}
// Send a stream of messages from server to client in separate data frames.
// Brings up performance issues seen in long streams.
// Created to show problem in go issue #18502
func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) {
benchmarkServerToClientStream(b)
}
// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8
// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer.
func BenchmarkServerToClientStreamReuseFrames(b *testing.B) {
benchmarkServerToClientStream(b, optFramerReuseFrames)
}
func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {
defer disableGoroutineTracking()()
b.ReportAllocs()
const msgLen = 1
// default window size
const windowSize = 1<<16 - 1
// next message to send from the server and for the client to expect
nextMsg := func(i int) []byte {
msg := make([]byte, msgLen)
msg[0] = byte(i)
if len(msg) != msgLen {
panic("invalid test setup msg length")
}
return msg
}
st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
// Consume the (empty) body from th peer before replying, otherwise
// the server will sometimes (depending on scheduling) send the peer a
// a RST_STREAM with the CANCEL error code.
if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {
b.Errorf("Copy error; got %v, %v; want 0, nil", n, err)
}
for i := 0; i < b.N; i += 1 {
w.Write(nextMsg(i))
w.(http.Flusher).Flush()
}
}, newServerOpts...)
defer st.Close()
st.greet()
const id = uint32(1)
st.writeHeaders(HeadersFrameParam{
StreamID: id,
BlockFragment: st.encodeHeader(":method", "POST"),
EndStream: false,
EndHeaders: true,
})
st.writeData(id, true, nil)
st.wantHeaders()
var pendingWindowUpdate = uint32(0)
for i := 0; i < b.N; i += 1 {
expected := nextMsg(i)
df := st.wantData()
if bytes.Compare(expected, df.data) != 0 {
b.Fatalf("Bad message received; want %v; got %v", expected, df.data)
}
// try to send infrequent but large window updates so they don't overwhelm the test
pendingWindowUpdate += uint32(len(df.data))
if pendingWindowUpdate >= windowSize/2 {
if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil {
b.Fatal(err)
}
if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil {
b.Fatal(err)
}
pendingWindowUpdate = 0
}
}
df := st.wantData()
if !df.StreamEnded() {
b.Fatalf("DATA didn't have END_STREAM; got %v", df)
}
}
// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53
// Verify we don't hang.
func TestIssue53(t *testing.T) {
const data = "PRI * HTTP/2.0\r\n\r\nSM" +
"\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad"
s := &http.Server{
ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags),
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte("hello"))
}),
}
s2 := &Server{
MaxReadFrameSize: 1 << 16,
PermitProhibitedCipherSuites: true,
}
c := &issue53Conn{[]byte(data), false, false}
s2.ServeConn(c, &ServeConnOpts{BaseConfig: s})
if !c.closed {
t.Fatal("connection is not closed")
}
}
type issue53Conn struct {
data []byte
closed bool
written bool
}
func (c *issue53Conn) Read(b []byte) (n int, err error) {
if len(c.data) == 0 {
return 0, io.EOF
}
n = copy(b, c.data)
c.data = c.data[n:]
return
}
func (c *issue53Conn) Write(b []byte) (n int, err error) {
c.written = true
return len(b), nil
}
func (c *issue53Conn) Close() error {
c.closed = true
return nil
}
func (c *issue53Conn) LocalAddr() net.Addr {
return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
}
func (c *issue53Conn) RemoteAddr() net.Addr {
return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}
}
func (c *issue53Conn) SetDeadline(t time.Time) error { return nil }
func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil }
func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil }
// golang.org/issue/12895
func TestConfigureServer(t *testing.T) {
tests := []struct {
name string
tlsConfig *tls.Config
wantErr string
}{
{
name: "empty server",
},
{
name: "just the required cipher suite",
tlsConfig: &tls.Config{
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
},
},
{
name: "just the alternative required cipher suite",
tlsConfig: &tls.Config{
CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256},
},
},
{
name: "missing required cipher suite",
tlsConfig: &tls.Config{
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384},
},
wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.",
},
{
name: "required after bad",
tlsConfig: &tls.Config{
CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},
},
wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after",
},
{
name: "bad after required",
tlsConfig: &tls.Config{
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA},
},
},
}
for _, tt := range tests {
srv := &http.Server{TLSConfig: tt.tlsConfig}
err := ConfigureServer(srv, nil)
if (err != nil) != (tt.wantErr != "") {
if tt.wantErr != "" {
t.Errorf("%s: success, but want error", tt.name)
} else {
t.Errorf("%s: unexpected error: %v", tt.name, err)
}
}
if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) {
t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr)
}
if err == nil && !srv.TLSConfig.PreferServerCipherSuites {
t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name)
}
}
}
func TestServerRejectHeadWithBody(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
})
defer st.Close()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "HEAD"),
EndStream: false, // what we're testing, a bogus HEAD request with body
EndHeaders: true,
})
st.wantRSTStream(1, ErrCodeProtocol)
}
func TestServerNoAutoContentLengthOnHead(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body. (or smaller than one frame)
})
defer st.Close()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(":method", "HEAD"),
EndStream: true,
EndHeaders: true,
})
h := st.wantHeaders()
headers := st.decodeHeader(h.HeaderBlockFragment())
want := [][2]string{
{":status", "200"},
}
if !reflect.DeepEqual(headers, want) {
t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
}
}
// golang.org/issue/13495
func TestServerNoDuplicateContentType(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
w.Header()["Content-Type"] = []string{""}
fmt.Fprintf(w, "<html><head></head><body>hi</body></html>")
})
defer st.Close()
st.greet()
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: st.encodeHeader(),
EndStream: true,
EndHeaders: true,
})
h := st.wantHeaders()
headers := st.decodeHeader(h.HeaderBlockFragment())
want := [][2]string{
{":status", "200"},
{"content-type", ""},
{"content-length", "41"},
{"x-content-type-options", "nosniff"},
}
if !reflect.DeepEqual(headers, want) {
t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want)
}
}
func disableGoroutineTracking() (restore func()) {
old := DebugGoroutines
DebugGoroutines = false
return func() { DebugGoroutines = old }
}
func BenchmarkServer_GetRequest(b *testing.B) {
defer disableGoroutineTracking()()
b.ReportAllocs()
const msg = "Hello, world."
st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
n, err := io.Copy(ioutil.Discard, r.Body)
if err != nil || n > 0 {
b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
}
io.WriteString(w, msg)
})
defer st.Close()
st.greet()
// Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err)
}
hbf := st.encodeHeader(":method", "GET")
for i := 0; i < b.N; i++ {
streamID := uint32(1 + 2*i)
st.writeHeaders(HeadersFrameParam{
StreamID: streamID,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
st.wantHeaders()
st.wantData()
}
}
func BenchmarkServer_PostRequest(b *testing.B) {
defer disableGoroutineTracking()()
b.ReportAllocs()
const msg = "Hello, world."
st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {
n, err := io.Copy(ioutil.Discard, r.Body)
if err != nil || n > 0 {
b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err)
}
io.WriteString(w, msg)
})
defer st.Close()
st.greet()
// Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err)
}
hbf := st.encodeHeader(":method", "POST")
for i := 0; i < b.N; i++ {
streamID := uint32(1 + 2*i)
st.writeHeaders(HeadersFrameParam{
StreamID: streamID,
BlockFragment: hbf,
EndStream: false,
EndHeaders: true,
})
st.writeData(streamID, true, nil)
st.wantHeaders()
st.wantData()
}
}
type connStateConn struct {
net.Conn
cs tls.ConnectionState
}
func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs }
// golang.org/issue/12737 -- handle any net.Conn, not just
// *tls.Conn.
func TestServerHandleCustomConn(t *testing.T) {
var s Server
c1, c2 := net.Pipe()
clientDone := make(chan struct{})
handlerDone := make(chan struct{})
var req *http.Request
go func() {
defer close(clientDone)
defer c2.Close()
fr := NewFramer(c2, c2)
io.WriteString(c2, ClientPreface)
fr.WriteSettings()
fr.WriteSettingsAck()
f, err := fr.ReadFrame()
if err != nil {
t.Error(err)
return
}
if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() {
t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f))
return
}
f, err = fr.ReadFrame()
if err != nil {
t.Error(err)
return
}
if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() {
t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f))
return
}
var henc hpackEncoder
fr.WriteHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"),
EndStream: true,
EndHeaders: true,
})
go io.Copy(ioutil.Discard, c2)
<-handlerDone
}()
const testString = "my custom ConnectionState"
fakeConnState := tls.ConnectionState{
ServerName: testString,
Version: tls.VersionTLS12,
CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{
BaseConfig: &http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer close(handlerDone)
req = r
}),
}})
select {
case <-clientDone:
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for handler")
}
if req.TLS == nil {
t.Fatalf("Request.TLS is nil. Got: %#v", req)
}
if req.TLS.ServerName != testString {
t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString)
}
}
// golang.org/issue/14214
func TestServer_Rejects_ConnHeaders(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
t.Error("should not get to Handler")
})
defer st.Close()
st.greet()
st.bodylessReq1("connection", "foo")
hf := st.wantHeaders()
goth := st.decodeHeader(hf.HeaderBlockFragment())
wanth := [][2]string{
{":status", "400"},
{"content-type", "text/plain; charset=utf-8"},
{"x-content-type-options", "nosniff"},
{"content-length", "51"},
}
if !reflect.DeepEqual(goth, wanth) {
t.Errorf("Got headers %v; want %v", goth, wanth)
}
}
type hpackEncoder struct {
enc *hpack.Encoder
buf bytes.Buffer
}
func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte {
if len(headers)%2 == 1 {
panic("odd number of kv args")
}
he.buf.Reset()
if he.enc == nil {
he.enc = hpack.NewEncoder(&he.buf)
}
for len(headers) > 0 {
k, v := headers[0], headers[1]
err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v})
if err != nil {
t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err)
}
headers = headers[2:]
}
return he.buf.Bytes()
}
func TestCheckValidHTTP2Request(t *testing.T) {
tests := []struct {
h http.Header
want error
}{
{
h: http.Header{"Te": {"trailers"}},
want: nil,
},
{
h: http.Header{"Te": {"trailers", "bogus"}},
want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`),
},
{
h: http.Header{"Foo": {""}},
want: nil,
},
{
h: http.Header{"Connection": {""}},
want: errors.New(`request header "Connection" is not valid in HTTP/2`),
},
{
h: http.Header{"Proxy-Connection": {""}},
want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`),
},
{
h: http.Header{"Keep-Alive": {""}},
want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`),
},
{
h: http.Header{"Upgrade": {""}},
want: errors.New(`request header "Upgrade" is not valid in HTTP/2`),
},
}
for i, tt := range tests {
got := checkValidHTTP2RequestHeaders(tt.h)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want)
}
}
}
// golang.org/issue/14030
func TestExpect100ContinueAfterHandlerWrites(t *testing.T) {
const msg = "Hello"
const msg2 = "World"
doRead := make(chan bool, 1)
defer close(doRead) // fallback cleanup
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, msg)
w.(http.Flusher).Flush()
// Do a read, which might force a 100-continue status to be sent.
<-doRead
r.Body.Read(make([]byte, 10))
io.WriteString(w, msg2)
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20))
req.Header.Set("Expect", "100-continue")
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
buf := make([]byte, len(msg))
if _, err := io.ReadFull(res.Body, buf); err != nil {
t.Fatal(err)
}
if string(buf) != msg {
t.Fatalf("msg = %q; want %q", buf, msg)
}
doRead <- true
if _, err := io.ReadFull(res.Body, buf); err != nil {
t.Fatal(err)
}
if string(buf) != msg2 {
t.Fatalf("second msg = %q; want %q", buf, msg2)
}
}
type funcReader func([]byte) (n int, err error)
func (f funcReader) Read(p []byte) (n int, err error) { return f(p) }
// golang.org/issue/16481 -- return flow control when streams close with unread data.
// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport)
func TestUnreadFlowControlReturned_Server(t *testing.T) {
unblock := make(chan bool, 1)
defer close(unblock)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// Don't read the 16KB request body. Wait until the client's
// done sending it and then return. This should cause the Server
// to then return those 16KB of flow control to the client.
<-unblock
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
// This previously hung on the 4th iteration.
for i := 0; i < 6; i++ {
body := io.MultiReader(
io.LimitReader(neverEnding('A'), 16<<10),
funcReader(func([]byte) (n int, err error) {
unblock <- true
return 0, io.EOF
}),
)
req, _ := http.NewRequest("POST", st.ts.URL, body)
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
}
}
func TestServerIdleTimeout(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
}, func(h2s *Server) {
h2s.IdleTimeout = 500 * time.Millisecond
})
defer st.Close()
st.greet()
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeNo {
t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
}
}
func TestServerIdleTimeout_AfterRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
const timeout = 250 * time.Millisecond
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
time.Sleep(timeout * 2)
}, func(h2s *Server) {
h2s.IdleTimeout = timeout
})
defer st.Close()
st.greet()
// Send a request which takes twice the timeout. Verifies the
// idle timeout doesn't fire while we're in a request:
st.bodylessReq1()
st.wantHeaders()
// But the idle timeout should be rearmed after the request
// is done:
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeNo {
t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode)
}
}
// grpc-go closes the Request.Body currently with a Read.
// Verify that it doesn't race.
// See https://github.com/grpc/grpc-go/pull/938
func TestRequestBodyReadCloseRace(t *testing.T) {
for i := 0; i < 100; i++ {
body := &requestBody{
pipe: &pipe{
b: new(bytes.Buffer),
},
}
body.pipe.CloseWithError(io.EOF)
done := make(chan bool, 1)
buf := make([]byte, 10)
go func() {
time.Sleep(1 * time.Millisecond)
body.Close()
done <- true
}()
body.Read(buf)
<-done
}
}
func TestIssue20704Race(t *testing.T) {
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
t.Skip("skipping in short mode")
}
const (
itemSize = 1 << 10
itemCount = 100
)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
for i := 0; i < itemCount; i++ {
_, err := w.Write(make([]byte, itemSize))
if err != nil {
return
}
}
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
cl := &http.Client{Transport: tr}
for i := 0; i < 1000; i++ {
resp, err := cl.Get(st.ts.URL)
if err != nil {
t.Fatal(err)
}
// Force a RST stream to the server by closing without
// reading the body:
resp.Body.Close()
}
}
func TestServer_Rejects_TooSmall(t *testing.T) {
testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {
return nil
}, func(st *serverTester) {
st.writeHeaders(HeadersFrameParam{
StreamID: 1, // clients send odd numbers
BlockFragment: st.encodeHeader(
":method", "POST",
"content-length", "4",
),
EndStream: false, // to say DATA frames are coming
EndHeaders: true,
})
st.writeData(1, true, []byte("12345"))
st.wantRSTStream(1, ErrCodeProtocol)
})
}
|
[
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME"
] |
[]
|
["GO_BUILDER_NAME"]
|
go
| 1 | 0 | |
fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java
|
// This file is made available under Elastic License 2.0.
// This file is based on code available under the Apache license here:
// https://github.com/apache/incubator-doris/blob/master/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.starrocks.utframe;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.starrocks.analysis.Analyzer;
import com.starrocks.analysis.SelectStmt;
import com.starrocks.analysis.SetVar;
import com.starrocks.analysis.SqlParser;
import com.starrocks.analysis.SqlScanner;
import com.starrocks.analysis.StatementBase;
import com.starrocks.analysis.StringLiteral;
import com.starrocks.analysis.UserIdentity;
import com.starrocks.catalog.Catalog;
import com.starrocks.catalog.DiskInfo;
import com.starrocks.catalog.OlapTable;
import com.starrocks.common.AnalysisException;
import com.starrocks.common.Config;
import com.starrocks.common.DdlException;
import com.starrocks.common.Pair;
import com.starrocks.common.util.SqlParserUtils;
import com.starrocks.mysql.privilege.Auth;
import com.starrocks.planner.PlanFragment;
import com.starrocks.planner.Planner;
import com.starrocks.planner.PlannerContext;
import com.starrocks.qe.ConnectContext;
import com.starrocks.qe.QueryState;
import com.starrocks.qe.SessionVariable;
import com.starrocks.qe.StmtExecutor;
import com.starrocks.qe.VariableMgr;
import com.starrocks.sql.analyzer.relation.Relation;
import com.starrocks.sql.optimizer.OperatorStrings;
import com.starrocks.sql.optimizer.OptExpression;
import com.starrocks.sql.optimizer.Optimizer;
import com.starrocks.sql.optimizer.base.ColumnRefFactory;
import com.starrocks.sql.optimizer.base.ColumnRefSet;
import com.starrocks.sql.optimizer.base.PhysicalPropertySet;
import com.starrocks.sql.optimizer.dump.QueryDumpInfo;
import com.starrocks.sql.optimizer.statistics.ColumnStatistic;
import com.starrocks.sql.optimizer.transformer.LogicalPlan;
import com.starrocks.sql.optimizer.transformer.RelationTransformer;
import com.starrocks.sql.plan.ExecPlan;
import com.starrocks.sql.plan.PlanFragmentBuilder;
import com.starrocks.statistic.Constants;
import com.starrocks.system.Backend;
import com.starrocks.system.SystemInfoService;
import com.starrocks.thrift.TExplainLevel;
import com.starrocks.thrift.TNetworkAddress;
import com.starrocks.utframe.MockedFrontend.EnvVarNotSetException;
import com.starrocks.utframe.MockedFrontend.FeStartException;
import com.starrocks.utframe.MockedFrontend.NotInitException;
import org.apache.commons.io.FileUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.net.ServerSocket;
import java.nio.channels.FileLock;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static com.starrocks.sql.plan.PlanTestBase.setPartitionStatistics;
public class UtFrameUtils {
private static final Logger LOG = LogManager.getLogger(UtFrameUtils.class);
public static final String createStatisticsTableStmt = "CREATE TABLE `table_statistic_v1` (\n" +
" `table_id` bigint(20) NOT NULL COMMENT \"\",\n" +
" `column_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `db_id` bigint(20) NOT NULL COMMENT \"\",\n" +
" `table_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `db_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `row_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `data_size` bigint(20) NOT NULL COMMENT \"\",\n" +
" `distinct_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `null_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `max` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `min` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `update_time` datetime NOT NULL COMMENT \"\"\n" +
") ENGINE=OLAP\n" +
"UNIQUE KEY(`table_id`, `column_name`, `db_id`)\n" +
"COMMENT \"OLAP\"\n" +
"DISTRIBUTED BY HASH(`table_id`, `column_name`, `db_id`) BUCKETS 10\n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\",\n" +
"\"in_memory\" = \"false\",\n" +
"\"storage_format\" = \"DEFAULT\"\n" +
");";
// Help to create a mocked ConnectContext.
public static ConnectContext createDefaultCtx() throws IOException {
ConnectContext ctx = new ConnectContext(null);
ctx.setCluster(SystemInfoService.DEFAULT_CLUSTER);
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setQualifiedUser(Auth.ROOT_USER);
ctx.setCatalog(Catalog.getCurrentCatalog());
ctx.setThreadLocalInfo();
ctx.getSessionVariable().disableNewPlanner();
return ctx;
}
// Help to create a mocked test ConnectContext.
public static ConnectContext createTestUserCtx(UserIdentity testUser) throws IOException {
ConnectContext ctx = new ConnectContext(null);
ctx.setCluster(SystemInfoService.DEFAULT_CLUSTER);
ctx.setCurrentUserIdentity(testUser);
ctx.setQualifiedUser(testUser.getQualifiedUser());
ctx.setCatalog(Catalog.getCurrentCatalog());
ctx.setThreadLocalInfo();
return ctx;
}
// Parse an origin stmt . Return a StatementBase instance.
public static StatementBase parseStmtWithNewAnalyzer(String originStmt, ConnectContext ctx)
throws Exception {
SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
com.starrocks.sql.analyzer.Analyzer analyzer =
new com.starrocks.sql.analyzer.Analyzer(ctx.getCatalog(), ctx);
StatementBase statementBase = null;
try {
statementBase = SqlParserUtils.getFirstStmt(parser);
} catch (AnalysisException e) {
String errorMessage = parser.getErrorMsg(originStmt);
System.err.println("parse failed: " + errorMessage);
if (errorMessage == null) {
throw e;
} else {
throw new AnalysisException(errorMessage, e);
}
}
Relation relation = analyzer.analyze(statementBase);
return statementBase;
}
// Parse an origin stmt and analyze it. Return a StatementBase instance.
public static StatementBase parseAndAnalyzeStmt(String originStmt, ConnectContext ctx)
throws Exception {
SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
Analyzer analyzer = new Analyzer(ctx.getCatalog(), ctx);
StatementBase statementBase = null;
try {
statementBase = SqlParserUtils.getFirstStmt(parser);
} catch (AnalysisException e) {
String errorMessage = parser.getErrorMsg(originStmt);
System.err.println("parse failed: " + errorMessage);
if (errorMessage == null) {
throw e;
} else {
throw new AnalysisException(errorMessage, e);
}
}
statementBase.analyze(analyzer);
return statementBase;
}
// for analyzing multi statements
public static List<StatementBase> parseAndAnalyzeStmts(String originStmt, ConnectContext ctx) throws Exception {
System.out.println("begin to parse stmts: " + originStmt);
SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
Analyzer analyzer = new Analyzer(ctx.getCatalog(), ctx);
List<StatementBase> statementBases = null;
try {
statementBases = SqlParserUtils.getMultiStmts(parser);
} catch (AnalysisException e) {
String errorMessage = parser.getErrorMsg(originStmt);
System.err.println("parse failed: " + errorMessage);
if (errorMessage == null) {
throw e;
} else {
throw new AnalysisException(errorMessage, e);
}
}
for (StatementBase stmt : statementBases) {
stmt.analyze(analyzer);
}
return statementBases;
}
public static void startFEServer(String runningDir) throws EnvVarNotSetException, IOException,
FeStartException, NotInitException, DdlException, InterruptedException {
// get STARROCKS_HOME
String starRocksHome = System.getenv("STARROCKS_HOME");
if (Strings.isNullOrEmpty(starRocksHome)) {
starRocksHome = Files.createTempDirectory("STARROCKS_HOME").toAbsolutePath().toString();
}
Config.plugin_dir = starRocksHome + "/plugins";
// fe only need edit_log_port
int fe_edit_log_port = findValidPort();
// start fe in "STARROCKS_HOME/fe/mocked/"
MockedFrontend frontend = MockedFrontend.getInstance();
Map<String, String> feConfMap = Maps.newHashMap();
// set additional fe config
feConfMap.put("edit_log_port", String.valueOf(fe_edit_log_port));
feConfMap.put("tablet_create_timeout_second", "10");
frontend.init(starRocksHome + "/" + runningDir, feConfMap);
frontend.start(new String[0]);
}
public static void createMinStarRocksCluster(String runningDir) throws EnvVarNotSetException, IOException,
FeStartException, NotInitException, DdlException, InterruptedException {
startFEServer(runningDir);
addMockBackend(10001);
// sleep to wait first heartbeat
int retry = 0;
while (Catalog.getCurrentSystemInfo().getBackend(10001).getBePort() == -1 &&
retry++ < 600) {
Thread.sleep(1000);
}
}
public static void addMockBackend(int backendId) throws IOException {
int fe_rpc_port = MockedFrontend.getInstance().getRpcPort();
// start be
MockedBackend backend = null;
for (int retry = 1; retry <= 5; retry++) {
int be_heartbeat_port = findValidPort();
int be_thrift_port = findValidPort();
int be_brpc_port = findValidPort();
int be_http_port = findValidPort();
backend = MockedBackendFactory.createBackend("127.0.0.1",
be_heartbeat_port, be_thrift_port, be_brpc_port, be_http_port,
new MockedBackendFactory.DefaultHeartbeatServiceImpl(be_thrift_port, be_http_port, be_brpc_port),
new MockedBackendFactory.DefaultBeThriftServiceImpl(),
new MockedBackendFactory.DefaultPBackendServiceImpl());
backend.setFeAddress(new TNetworkAddress("127.0.0.1", fe_rpc_port));
try {
backend.start();
break;
} catch (IOException ex) {
System.out.println("start be fail, message : " + ex.getMessage());
if (retry == 5) {
throw ex;
}
}
}
// add be
Backend be = new Backend(backendId, backend.getHost(), backend.getHeartbeatPort());
Map<String, DiskInfo> disks = Maps.newHashMap();
DiskInfo diskInfo1 = new DiskInfo(backendId + "/path1");
diskInfo1.setTotalCapacityB(1000000);
diskInfo1.setAvailableCapacityB(500000);
diskInfo1.setDataUsedCapacityB(480000);
disks.put(diskInfo1.getRootPath(), diskInfo1);
be.setDisks(ImmutableMap.copyOf(disks));
be.setAlive(true);
be.setOwnerClusterName(SystemInfoService.DEFAULT_CLUSTER);
Catalog.getCurrentSystemInfo().addBackend(be);
}
public static void dropMockBackend(int backendId) throws DdlException {
Catalog.getCurrentSystemInfo().dropBackend(backendId);
}
public static void cleanStarRocksFeDir(String baseDir) {
try {
FileUtils.deleteDirectory(new File(baseDir));
} catch (IOException e) {
}
}
public static int findValidPort() {
String starRocksHome = System.getenv("STARROCKS_HOME");
for (int i = 0; i < 10; i++) {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
int port = socket.getLocalPort();
File file = new File(starRocksHome + "/fe/ut_ports/" + port);
if (file.exists()) {
continue;
}
RandomAccessFile accessFile = new RandomAccessFile(file, "rws");
FileLock lock = accessFile.getChannel().tryLock();
if (lock == null) {
continue;
}
System.out.println("find valid port " + port + new Date());
return port;
} catch (Exception e) {
e.printStackTrace();
throw new IllegalStateException("Could not find a free TCP/IP port " + e.getMessage());
}
}
throw new RuntimeException("can not find valid port");
}
public static String getSQLPlanOrErrorMsg(ConnectContext ctx, String queryStr) throws Exception {
ctx.getState().reset();
StmtExecutor stmtExecutor = new StmtExecutor(ctx, queryStr);
stmtExecutor.execute();
if (ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) {
Planner planner = stmtExecutor.planner();
return planner.getExplainString(planner.getFragments(), TExplainLevel.NORMAL);
} else {
return ctx.getState().getErrorMessage();
}
}
public static String getPlanThriftString(ConnectContext ctx, String queryStr) throws Exception {
ctx.getState().reset();
StmtExecutor stmtExecutor = new StmtExecutor(ctx, queryStr);
stmtExecutor.execute();
if (ctx.getState().getStateType() != QueryState.MysqlStateType.ERR) {
Planner planner = stmtExecutor.planner();
return getThriftString(planner.getFragments());
} else {
return ctx.getState().getErrorMessage();
}
}
public static String getPlanThriftStringForNewPlanner(ConnectContext ctx, String queryStr) throws Exception {
return UtFrameUtils.getThriftString(UtFrameUtils.getNewPlanAndFragment(ctx, queryStr).second.getFragments());
}
public static Pair<String, ExecPlan> getNewPlanAndFragment(ConnectContext connectContext, String originStmt)
throws Exception {
connectContext.setDumpInfo(new QueryDumpInfo(connectContext.getSessionVariable()));
SqlScanner input =
new SqlScanner(new StringReader(originStmt), connectContext.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
StatementBase statementBase = SqlParserUtils.getFirstStmt(parser);
connectContext.getDumpInfo().setOriginStmt(originStmt);
SessionVariable oldSessionVariable = connectContext.getSessionVariable();
try {
// update session variable by adding optional hints.
if (statementBase instanceof SelectStmt) {
Map<String, String> optHints = ((SelectStmt) statementBase).getSelectList().getOptHints();
if (optHints != null) {
SessionVariable sessionVariable = (SessionVariable) oldSessionVariable.clone();
for (String key : optHints.keySet()) {
VariableMgr.setVar(sessionVariable, new SetVar(key, new StringLiteral(optHints.get(key))));
}
connectContext.setSessionVariable(sessionVariable);
}
}
com.starrocks.sql.analyzer.Analyzer analyzer =
new com.starrocks.sql.analyzer.Analyzer(Catalog.getCurrentCatalog(), connectContext);
Relation relation = analyzer.analyze(statementBase);
ColumnRefFactory columnRefFactory = new ColumnRefFactory();
LogicalPlan logicalPlan = new RelationTransformer(columnRefFactory).transform(relation);
Optimizer optimizer = new Optimizer();
OptExpression optimizedPlan = optimizer.optimize(
connectContext,
logicalPlan.getRoot(),
new PhysicalPropertySet(),
new ColumnRefSet(logicalPlan.getOutputColumn()),
columnRefFactory);
PlannerContext plannerContext =
new PlannerContext(null, null, connectContext.getSessionVariable().toThrift(), null);
ExecPlan execPlan = new PlanFragmentBuilder()
.createPhysicalPlan(optimizedPlan, plannerContext, connectContext,
logicalPlan.getOutputColumn(), columnRefFactory, new ArrayList<>());
OperatorStrings operatorPrinter = new OperatorStrings();
return new Pair<>(operatorPrinter.printOperator(optimizedPlan), execPlan);
} finally {
// before returing we have to restore session varibale.
connectContext.setSessionVariable(oldSessionVariable);
}
}
public static Pair<String, ExecPlan> getNewPlanAndFragmentFromDump(ConnectContext connectContext,
QueryDumpInfo replayDumpInfo) throws Exception {
// mock statistics table
StarRocksAssert starRocksAssert = new StarRocksAssert(connectContext);
if (!starRocksAssert.databaseExist("_statistics_")) {
starRocksAssert.withDatabaseWithoutAnalyze(Constants.StatisticsDBName).useDatabase(Constants.StatisticsDBName);
starRocksAssert.withTable(createStatisticsTableStmt);
}
// prepare dump mock environment
// statement
String replaySql = replayDumpInfo.getOriginStmt();
// session variable
connectContext.setSessionVariable(replayDumpInfo.getSessionVariable());
// create table
int backendId = 10002;
int backendIdSize = connectContext.getCatalog().getCurrentSystemInfo().getBackendIds(true).size();
for (int i = 1; i < backendIdSize; ++i) {
UtFrameUtils.dropMockBackend(backendId++);
}
Set<String> dbSet = replayDumpInfo.getCreateTableStmtMap().keySet().stream().map(key -> key.split("\\.")[0])
.collect(Collectors.toSet());
dbSet.stream().forEach(db -> {
if (starRocksAssert.databaseExist(db)) {
try {
starRocksAssert.dropDatabase(db);
} catch (Exception e) {
e.printStackTrace();
}
}
});
for (Map.Entry<String, String> entry : replayDumpInfo.getCreateTableStmtMap().entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
if (!starRocksAssert.databaseExist(dbName)) {
starRocksAssert.withDatabase(dbName);
}
starRocksAssert.useDatabase(dbName);
starRocksAssert.withTable(entry.getValue());
}
// mock be num
backendId = 10002;
for (int i = 1; i < replayDumpInfo.getBeNum(); ++i) {
UtFrameUtils.addMockBackend(backendId++);
}
// mock table row count
for (Map.Entry<String, Map<String, Long>> entry : replayDumpInfo.getPartitionRowCountMap().entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
OlapTable replayTable = (OlapTable) connectContext.getCatalog().getDb("default_cluster:" + dbName)
.getTable(entry.getKey().split("\\.")[1]);
for (Map.Entry<String, Long> partitionEntry : entry.getValue().entrySet()) {
setPartitionStatistics(replayTable, partitionEntry.getKey(), partitionEntry.getValue());
}
}
// mock table column statistics
for (Map.Entry<String, Map<String, ColumnStatistic>> entry : replayDumpInfo.getTableStatisticsMap()
.entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
OlapTable replayTable = (OlapTable) connectContext.getCatalog().getDb("default_cluster:" + dbName)
.getTable(entry.getKey().split("\\.")[1]);
for (Map.Entry<String, ColumnStatistic> columnStatisticEntry : entry.getValue().entrySet()) {
Catalog.getCurrentStatisticStorage().addColumnStatistic(replayTable, columnStatisticEntry.getKey(),
columnStatisticEntry.getValue());
}
}
SqlScanner input =
new SqlScanner(new StringReader(replaySql), replayDumpInfo.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
StatementBase statementBase = SqlParserUtils.getFirstStmt(parser);
com.starrocks.sql.analyzer.Analyzer analyzer =
new com.starrocks.sql.analyzer.Analyzer(Catalog.getCurrentCatalog(), connectContext);
Relation relation = analyzer.analyze(statementBase);
ColumnRefFactory columnRefFactory = new ColumnRefFactory();
LogicalPlan logicalPlan = new RelationTransformer(columnRefFactory).transform(relation);
Optimizer optimizer = new Optimizer();
OptExpression optimizedPlan = optimizer.optimize(
connectContext,
logicalPlan.getRoot(),
new PhysicalPropertySet(),
new ColumnRefSet(logicalPlan.getOutputColumn()),
columnRefFactory);
PlannerContext plannerContext =
new PlannerContext(null, null, connectContext.getSessionVariable().toThrift(), null);
ExecPlan execPlan = new PlanFragmentBuilder()
.createPhysicalPlan(optimizedPlan, plannerContext, connectContext,
logicalPlan.getOutputColumn(), columnRefFactory, new ArrayList<>());
OperatorStrings operatorPrinter = new OperatorStrings();
return new Pair<>(operatorPrinter.printOperator(optimizedPlan), execPlan);
}
public static String getThriftString(List<PlanFragment> fragments) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < fragments.size(); ++i) {
if (i > 0) {
// a blank line between plan fragments
str.append("\n");
}
str.append(fragments.get(i).toThrift());
}
return str.toString();
}
public static String getNewFragmentPlan(ConnectContext connectContext, String sql) throws Exception {
return getNewPlanAndFragment(connectContext, sql).second.getExplainString(TExplainLevel.NORMAL);
}
}
|
[
"\"STARROCKS_HOME\"",
"\"STARROCKS_HOME\""
] |
[] |
[
"STARROCKS_HOME"
] |
[]
|
["STARROCKS_HOME"]
|
java
| 1 | 0 | |
cmd/kat-server/services/http.go
|
package services
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
)
// HTTP server object (all fields are required).
type HTTP struct {
Port int16
Backend string
SecurePort int16
SecureBackend string
Cert string
Key string
TLSVersion string
}
func getTLSVersion(state *tls.ConnectionState) string {
switch state.Version {
case tls.VersionTLS10:
return "v1.0"
case tls.VersionTLS11:
return "v1.1"
case tls.VersionTLS12:
return "v1.2"
// TLS v1.3 is experimental.
case 0x0304:
return "v1.3"
default:
return "unknown"
}
}
// Start initializes the HTTP server.
func (h *HTTP) Start() <-chan bool {
log.Printf("HTTP: %s listening on %d/%d", h.Backend, h.Port, h.SecurePort)
mux := http.NewServeMux()
mux.HandleFunc("/", h.handler)
exited := make(chan bool)
go func() {
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", h.Port), mux))
close(exited)
}()
go func() {
s := &http.Server{
Addr: fmt.Sprintf(":%v", h.SecurePort),
Handler: mux,
}
log.Fatal(s.ListenAndServeTLS(h.Cert, h.Key))
close(exited)
}()
return exited
}
// Helpers
func lower(m map[string][]string) (result map[string][]string) {
result = make(map[string][]string)
for k, v := range m {
result[strings.ToLower(k)] = v
}
return result
}
func (h *HTTP) handler(w http.ResponseWriter, r *http.Request) {
// Assume we're the clear side of the world.
backend := h.Backend
conntype := "CLR"
var request = make(map[string]interface{})
var url = make(map[string]interface{})
request["url"] = url
url["fragment"] = r.URL.Fragment
url["host"] = r.URL.Host
url["opaque"] = r.URL.Opaque
url["path"] = r.URL.Path
url["query"] = r.URL.Query()
url["rawQuery"] = r.URL.RawQuery
url["scheme"] = r.URL.Scheme
if r.URL.User != nil {
url["username"] = r.URL.User.Username()
pw, ok := r.URL.User.Password()
if ok {
url["password"] = pw
}
}
request["method"] = r.Method
request["headers"] = lower(r.Header)
request["host"] = r.Host
var tlsrequest = make(map[string]interface{})
request["tls"] = tlsrequest
tlsrequest["enabled"] = r.TLS != nil
if r.TLS != nil {
// We're the secure side of the world, I guess.
backend = h.SecureBackend
conntype = "TLS"
tlsrequest["negotiated-protocol"] = r.TLS.NegotiatedProtocol
tlsrequest["server-name"] = r.TLS.ServerName
tlsrequest["negotiated-protocol-version"] = getTLSVersion(r.TLS)
}
// respond with the requested status
status := r.Header.Get("Requested-Status")
if status == "" {
status = "200"
}
statusCode, err := strconv.Atoi(status)
if err != nil {
log.Print(err)
statusCode = 500
}
// copy the requested headers into the response
headers, ok := r.Header["Requested-Header"]
if ok {
for _, header := range headers {
canonical := http.CanonicalHeaderKey(header)
value, ok := r.Header[canonical]
if ok {
w.Header()[canonical] = value
}
}
}
if b, _ := ioutil.ReadAll(r.Body); b != nil {
body := string(b)
log.Printf("received body: %s", body)
w.Header()[http.CanonicalHeaderKey("Auth-Request-Body")] = []string{body}
}
defer r.Body.Close()
cookies, ok := r.Header["Requested-Cookie"]
if ok {
for _, v := range strings.Split(cookies[0], ",") {
val := strings.Trim(v, " ")
http.SetCookie(w, &http.Cookie{
Name: val,
Value: val,
})
}
}
// If they asked for a specific location to be returned, handle that too.
location, ok := r.Header["Requested-Location"]
if ok {
w.Header()[http.CanonicalHeaderKey("Location")] = location
}
addExtauthEnv := os.Getenv("INCLUDE_EXTAUTH_HEADER")
// KAT tests that sent really big request headers might 503 if we send the request headers
// in the response. Enable tests to override the env var
addExtAuthOverride := r.URL.Query().Get("override_extauth_header")
if len(addExtauthEnv) > 0 && len(addExtAuthOverride) == 0 {
extauth := make(map[string]interface{})
extauth["request"] = request
extauth["resp_headers"] = lower(w.Header())
eaJSON, err := json.Marshal(extauth)
if err != nil {
eaJSON = []byte(fmt.Sprintf("err: %v", err))
}
eaArray := make([]string, 1, 1)
eaArray[0] = string(eaJSON)
w.Header()[http.CanonicalHeaderKey("extauth")] = eaArray
}
// Check header and delay response.
if h, ok := r.Header["Requested-Backend-Delay"]; ok {
if v, err := strconv.Atoi(h[0]); err == nil {
log.Printf("Delaying response by %v ms", v)
time.Sleep(time.Duration(v) * time.Millisecond)
}
}
// Set date response header.
w.Header().Set("Date", time.Now().Format(time.RFC1123))
w.WriteHeader(statusCode)
// Write out all request/response information
var response = make(map[string]interface{})
response["headers"] = lower(w.Header())
var body = make(map[string]interface{})
body["backend"] = backend
body["request"] = request
body["response"] = response
b, err := json.MarshalIndent(body, "", " ")
if err != nil {
b = []byte(fmt.Sprintf("Error: %v", err))
}
log.Printf("%s (%s): writing response HTTP %v", backend, conntype, statusCode)
w.Write(b)
}
|
[
"\"INCLUDE_EXTAUTH_HEADER\""
] |
[] |
[
"INCLUDE_EXTAUTH_HEADER"
] |
[]
|
["INCLUDE_EXTAUTH_HEADER"]
|
go
| 1 | 0 | |
core/wsgi.py
|
"""
WSGI config for itunes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
seatunnel-core/seatunnel-core-base/src/main/java/org/apache/seatunnel/command/BaseTaskExecuteCommand.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.seatunnel.command;
import org.apache.seatunnel.common.Constants;
import org.apache.seatunnel.common.config.CheckResult;
import org.apache.seatunnel.common.config.Common;
import org.apache.seatunnel.common.config.DeployMode;
import org.apache.seatunnel.env.RuntimeEnv;
import org.apache.seatunnel.plugin.Plugin;
import org.apache.seatunnel.utils.AsciiArtUtils;
import org.apache.seatunnel.utils.CompressionUtils;
import org.apache.commons.compress.archivers.ArchiveException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
/**
* Base task execute command. More details see:
* <ul>
* <li>{@link org.apache.seatunnel.command.flink.FlinkTaskExecuteCommand}</li>
* <li>{@link org.apache.seatunnel.command.spark.SparkTaskExecuteCommand}</li>
* </ul>
*
* @param <T> command args.
*/
public abstract class BaseTaskExecuteCommand<T extends CommandArgs> implements Command<T> {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseTaskExecuteCommand.class);
/**
* Check the plugin config.
*
* @param plugins plugin list.
*/
protected void baseCheckConfig(List<? extends Plugin>... plugins) {
pluginCheck(plugins);
deployModeCheck();
}
/**
* Execute prepare method defined in {@link Plugin}.
*
* @param env runtimeEnv
* @param plugins plugin list
*/
protected void prepare(RuntimeEnv env, List<? extends Plugin>... plugins) {
for (List<? extends Plugin> pluginList : plugins) {
pluginList.forEach(plugin -> plugin.prepare(env));
}
}
/**
* Print the logo.
*/
protected void showAsciiLogo() {
String printAsciiLogo = System.getenv("SEATUNNEL_PRINT_ASCII_LOGO");
if ("true".equalsIgnoreCase(printAsciiLogo)) {
AsciiArtUtils.printAsciiArt(Constants.LOGO);
}
}
/**
* Execute the checkConfig method defined in {@link Plugin}.
*
* @param plugins plugin list
*/
private void pluginCheck(List<? extends Plugin>... plugins) {
for (List<? extends Plugin> pluginList : plugins) {
for (Plugin plugin : pluginList) {
CheckResult checkResult;
try {
checkResult = plugin.checkConfig();
} catch (Exception e) {
checkResult = CheckResult.error(e.getMessage());
}
if (!checkResult.isSuccess()) {
LOGGER.error("Plugin[{}] contains invalid config, error: {} \n", plugin.getClass().getName(), checkResult.getMsg());
System.exit(-1); // invalid configuration
}
}
}
}
private void deployModeCheck() {
final Optional<String> mode = Common.getDeployMode();
if (mode.isPresent() && DeployMode.CLUSTER.getName().equals(mode.get())) {
LOGGER.info("preparing cluster mode work dir files...");
File workDir = new File(".");
for (File file : Objects.requireNonNull(workDir.listFiles())) {
LOGGER.warn("\t list file: " + file.getAbsolutePath());
}
// decompress plugin dir
File compressedFile = new File("plugins.tar.gz");
try {
File tempFile = CompressionUtils.unGzip(compressedFile, workDir);
try {
CompressionUtils.unTar(tempFile, workDir);
LOGGER.info("succeeded to decompress plugins.tar.gz");
} catch (ArchiveException e) {
LOGGER.error("failed to decompress plugins.tar.gz", e);
System.exit(-1);
}
} catch (IOException e) {
LOGGER.error("failed to decompress plugins.tar.gz", e);
System.exit(-1);
}
}
}
}
|
[
"\"SEATUNNEL_PRINT_ASCII_LOGO\""
] |
[] |
[
"SEATUNNEL_PRINT_ASCII_LOGO"
] |
[]
|
["SEATUNNEL_PRINT_ASCII_LOGO"]
|
java
| 1 | 0 | |
share/lib/python/neuron/rxd/rxd.py
|
from neuron import h, nrn, nrn_dll_sym
from . import species, node, section1d, region, generalizedReaction, constants
from .nodelist import NodeList
from .node import _point_indices
import weakref
import numpy
import ctypes
import atexit
from . import options
from .rxdException import RxDException
from . import initializer
import collections
import os
from distutils import sysconfig
import uuid
import sys
import itertools
from numpy.ctypeslib import ndpointer
import re
import platform
from warnings import warn
# aliases to avoid repeatedly doing multiple hash-table lookups
_numpy_array = numpy.array
_numpy_zeros = numpy.zeros
_species_get_all_species = species._get_all_species
_node_get_states = node._get_states
_section1d_transfer_to_legacy = section1d._transfer_to_legacy
_ctypes_c_int = ctypes.c_int
_weakref_ref = weakref.ref
_external_solver = None
_external_solver_initialized = False
_windows_dll_files = []
_windows_dll = []
make_time_ptr = nrn_dll_sym('make_time_ptr')
make_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]
make_time_ptr(h._ref_dt, h._ref_t)
_double_ptr = ctypes.POINTER(ctypes.c_double)
_int_ptr = ctypes.POINTER(_ctypes_c_int)
_long_ptr = ctypes.POINTER(ctypes.c_long)
fptr_prototype = ctypes.CFUNCTYPE(None)
set_nonvint_block = nrn_dll_sym('set_nonvint_block')
set_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))
set_setup = nrn_dll_sym('set_setup')
set_setup.argtypes = [fptr_prototype]
set_initialize = nrn_dll_sym('set_initialize')
set_initialize.argtypes = [fptr_prototype]
scatter_concentrations = nrn_dll_sym('scatter_concentrations')
# Transfer extracellular concentrations to NEURON
_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)
rxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')
setup_solver = nrn_dll_sym('setup_solver')
setup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'), ctypes.c_int]
#states = None
_set_num_threads = nrn_dll_sym('set_num_threads')
_set_num_threads.argtypes = [ctypes.c_int]
_get_num_threads = nrn_dll_sym('get_num_threads')
_get_num_threads.restype = ctypes.c_int
free_conc_ptrs = nrn_dll_sym('free_conc_ptrs')
free_curr_ptrs = nrn_dll_sym('free_curr_ptrs')
clear_rates = nrn_dll_sym('clear_rates')
register_rate = nrn_dll_sym('register_rate')
register_rate.argtypes = [
ctypes.c_int, #num species
ctypes.c_int, #num parameters
ctypes.c_int, #num regions
ctypes.c_int, #num seg
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids
ctypes.c_int, #num ecs species
ctypes.c_int, #num ecs parameters
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids
numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs indices
ctypes.c_int, #num multicompartment reactions
numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers
ctypes.POINTER(ctypes.py_object), #voltage pointers
] #Reaction rate function
setup_currents = nrn_dll_sym('setup_currents')
setup_currents.argtypes = [
ctypes.c_int, #number of membrane currents
ctypes.c_int, #number induced currents
_int_ptr, #number of species involved in each membrane current
_int_ptr, #node indices
_double_ptr, #scaling (areas) of the fluxes
ctypes.POINTER(ctypes.py_object), #hoc pointers
_int_ptr, #maps for membrane fluxes
_int_ptr #maps for ecs fluxes
]
ics_register_reaction = nrn_dll_sym('ics_register_reaction')
ics_register_reaction.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
_int_ptr,
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
ctypes.c_int,
numpy.ctypeslib.ndpointer(dtype=numpy.float),
]
ecs_register_reaction = nrn_dll_sym('ecs_register_reaction')
ecs_register_reaction.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
_int_ptr,
]
set_hybrid_data = nrn_dll_sym('set_hybrid_data')
set_hybrid_data.argtypes = [
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.int64),
numpy.ctypeslib.ndpointer(dtype=numpy.float_),
numpy.ctypeslib.ndpointer(dtype=numpy.float_),
numpy.ctypeslib.ndpointer(dtype=numpy.float_),
numpy.ctypeslib.ndpointer(dtype=numpy.float_),
]
#ics_register_reaction = nrn_dll_sym('ics_register_reaction')
#ics_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]
set_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')
set_euler_matrix.argtypes = [
ctypes.c_int,
ctypes.c_int,
_long_ptr,
_long_ptr,
_double_ptr,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
]
rxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')
rxd_setup_curr_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),
ctypes.POINTER(ctypes.py_object),
]
rxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')
rxd_setup_conc_ptrs.argtypes = [
ctypes.c_int,
_int_ptr,
ctypes.POINTER(ctypes.py_object)
]
rxd_include_node_flux1D = nrn_dll_sym('rxd_include_node_flux1D')
rxd_include_node_flux1D.argtypes = [ctypes.c_int, _long_ptr, _double_ptr,
ctypes.POINTER(ctypes.py_object)]
rxd_include_node_flux3D = nrn_dll_sym('rxd_include_node_flux3D')
rxd_include_node_flux3D.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _long_ptr,
_double_ptr,
ctypes.POINTER(ctypes.py_object)]
_c_headers = """#include <math.h>
/*Some functions supported by numpy that aren't included in math.h
* names and arguments match the wrappers used in rxdmath.py
*/
double factorial(const double);
double degrees(const double);
void radians(const double, double*);
double log1p(const double);
double vtrap(const double, const double);
"""
def _list_to_cint_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_int * len(data))(*tuple(data))
def _list_to_cdouble_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_double * len(data))(*tuple(data))
def _list_to_clong_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.c_long * len(data))(*tuple(data))
def _list_to_pyobject_array(data):
if data is None or len(data) == 0:
return None
else:
return (ctypes.py_object * len(data))(*tuple(data))
def byeworld():
# do not call __del__ that rearrange memory for states
species.Species.__del__ = lambda x: None
species._ExtracellularSpecies.__del__ = lambda x: None
species._IntracellularSpecies.__del__ = lambda x: None
section1d.Section1D.__del__ = lambda x: None
generalizedReaction.GeneralizedReaction.__del__ = lambda x: None
# needed to prevent a seg-fault error at shutdown in at least some
# combinations of NEURON and Python, which I think is due to objects
# getting deleted out-of-order
global _react_matrix_solver
try:
del _react_matrix_solver
except NameError:
# # if it already didn't exist, that's fine
pass
_windows_remove_dlls()
atexit.register(byeworld)
_cvode_object = h.CVode()
last_diam_change_cnt = None
last_structure_change_cnt = None
last_nrn_legacy_units = h.nrnunit_use_legacy()
_all_reactions = []
nrn_tree_solve = nrn_dll_sym('nrn_tree_solve')
nrn_tree_solve.restype = None
_dptr = _double_ptr
_dimensions = {1: h.SectionList(), 3: h.SectionList()}
_dimensions_default = 1
_default_dx = 0.25
_default_method = 'deterministic'
#CRxD
_diffusion_d = None
_diffusion_a = None
_diffusion_b = None
_diffusion_p = None
_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None
def _domain_lookup(sec, dim=None):
for d, sl in _dimensions.items():
if sec in sl:
if dim is not None and d != dim:
sl.remove(sec)
return _domain_lookup(sec, dim)
return d
dimension = dim if dim else _dimensions_default
_dimensions[dimension].append(sec)
return dimension
def set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):
"""Specify the numerical discretization and solver options.
domain -- a section or Python iterable of sections"""
global _dimensions_default, _dimensions
setting_default = False
if domain is None:
domain = h.allsec()
setting_default = True
elif isinstance(domain, nrn.Section):
domain = [domain]
# NOTE: These attributes are set on a per-nrn.Section basis; they cannot
# assume Section1D objects exist because they might be specified before
# those objects are created
# domain is now always an iterable (or invalid)
if method is not None:
raise RxDException('using set_solve_type to specify method is not yet implemented')
if dimension is not None:
if dimension not in (1, 3):
raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')
if setting_default:
_dimensions_default = dimension
for sec in domain:
_domain_lookup(sec, dimension)
if dx is not None:
raise RxDException('using set_solve_type to specify dx is not yet implemented')
if nsubseg is not None:
raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')
def _unregister_reaction(r):
global _all_reactions
react = r() if isinstance(r, weakref.ref) else r
with initializer._init_lock:
_all_reactions = list(filter(lambda x: x() is not None and x() != react, _all_reactions))
def _register_reaction(r):
# TODO: should we search to make sure that (a weakref to) r hasn't already been added?
global _all_reactions, _external_solver_initialized
with initializer._init_lock:
_all_reactions.append(_weakref_ref(r))
_external_solver_initialized = False
def _after_advance():
global last_diam_change_cnt
last_diam_change_cnt = _diam_change_count.value
def re_init():
"""reinitializes all rxd concentrations to match HOC values, updates matrices"""
global _external_solver_initialized
h.define_shape()
if not species._has_3d:
# TODO: if we do have 3D, make sure that we do the necessary parts of this
# update current pointers
section1d._purge_cptrs()
for sr in _species_get_all_species():
s = sr()
if s is not None:
s._register_cptrs()
# update matrix equations
_setup_matrices()
for sr in _species_get_all_species():
s = sr()
if s is not None: s.re_init()
# TODO: is this safe?
_cvode_object.re_init()
_external_solver_initialized = False
def _setup_memb_currents():
initializer._do_init()
# setup membrane fluxes from our stuff
# TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped
# because won't change very often
# need this; think it's because of initialization of mod files
# setup for induced membrane currents
cur_node_indices = []
cur_map = {}
curr_indices = []
curr_scales = []
curr_ptrs = []
for sr in _species_get_all_species():
s = sr()
if s is not None: s._setup_currents(curr_indices, curr_scales, curr_ptrs, cur_map)
num = len(curr_ptrs)
if num:
curr_ptr_vector = _h_ptrvector(num)
curr_ptr_vector.ptr_update_callback(_donothing)
for i, ptr in enumerate(curr_ptrs):
curr_ptr_vector.pset(i, ptr)
curr_ptr_storage_nrn = _h_vector(num)
else:
curr_ptr_vector = None
curr_ptr_storage_nrn = None
for rptr in _all_reactions:
r = rptr()
if r is not None:
r._update_indices()
r._setup_membrane_fluxes(cur_node_indices, cur_map)
if not curr_indices:
free_curr_ptrs()
return
rxd_setup_curr_ptrs(len(curr_indices),
_list_to_cint_array(curr_indices),
numpy.concatenate(curr_scales),
_list_to_pyobject_array(curr_ptrs))
SPECIES_ABSENT = -1
# TODO: change so that this is only called when there are in fact currents
rxd_memb_scales = []
memb_cur_ptrs = []
memb_cur_mapped = []
memb_cur_mapped_ecs = []
memb_cur_ptrs= []
for rptr in _all_reactions:
r = rptr()
if r and r._membrane_flux:
r._do_memb_scales(cur_map)
scales = r._memb_scales
rxd_memb_scales.extend(scales)
memb_cur_ptrs += r._cur_ptrs
memb_cur_mapped += r._cur_mapped
memb_cur_mapped_ecs += r._cur_mapped_ecs
ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]
ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]
if memb_cur_ptrs:
cur_counts = [len(x) for x in memb_cur_mapped] #TODO: is len(x) the same for all x?
num_fluxes = numpy.array(cur_counts).sum()
num_currents = len(memb_cur_ptrs)
memb_cur_ptrs = list(itertools.chain.from_iterable(memb_cur_ptrs))
"""print("num_currents",num_currents)
print("num_fluxes",num_fluxes)
print("num_nodes",curr_indices)
print("num_species",cur_counts)
print("cur_idxs",curr_indices)
print("node_idxs",cur_node_indices)
print("scales",rxd_memb_scales)
print("ptrs",memb_cur_ptrs)
print("mapped",ics_map,min(abs(numpy.array(ics_map))),max(ics_map))
print("mapped_ecs",ecs_map,max(ecs_map))"""
setup_currents(num_currents,
num_fluxes,
_list_to_cint_array(cur_counts),
_list_to_cint_array(cur_node_indices),
_list_to_cdouble_array(rxd_memb_scales),
_list_to_pyobject_array(memb_cur_ptrs),
_list_to_cint_array(ics_map),
_list_to_cint_array(ecs_map))
def _setup():
from . import initializer
if not initializer.is_initialized(): initializer._do_init()
# TODO: this is when I should resetup matrices (structure changed event)
global _external_solver_initialized, last_diam_change_cnt, last_structure_change_cnt
_external_solver_initialized = False
# Using C-code for reactions
options.use_reaction_contribution_to_jacobian = False
with initializer._init_lock:
_update_node_data()
def _find_librxdmath():
import glob
# cmake doesn't create x86_64 directory under install prefix
base_path = os.path.join(h.neuronhome(), "..", "..", platform.machine())
if not os.path.exists(base_path):
base_path = os.path.join(h.neuronhome(), "..", "..")
base_path = os.path.join(base_path, "lib", "librxdmath")
success = False
for extension in ['', '.dll', '.so', '.dylib']:
dll = base_path + extension
try:
success = os.path.exists(dll)
except:
pass
if success: break
if not success:
if sys.platform.lower().startswith("win"):
dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')
success = os.path.exists(dll)
if not success:
raise RxDException('unable to connect to the librxdmath library')
return dll
def _c_compile(formula):
filename = 'rxddll' + str(uuid.uuid1())
with open(filename + '.c', 'w') as f:
f.write(formula)
math_library = '-lm'
fpic = '-fPIC'
try:
gcc = os.environ["CC"]
except:
#when running on windows try and used the gcc included with NEURON
if sys.platform.lower().startswith("win"):
math_library = ''
fpic = ''
gcc = os.path.join(h.neuronhome(),"mingw","mingw64","bin","x86_64-w64-mingw32-gcc.exe")
if not os.path.isfile(gcc):
raise RxDException("unable to locate a C compiler. Please `set CC=<path to C compiler>`")
else:
gcc = "gcc"
#TODO: Check this works on non-Linux machines
gcc_cmd = "%s -I%s -I%s " % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), "..", "..", "include", "nrn"))
gcc_cmd += "-shared %s %s.c %s " % (fpic, filename, _find_librxdmath())
gcc_cmd += "-o %s.so %s" % (filename, math_library)
if sys.platform.lower().startswith("win"):
my_path = os.getenv('PATH')
os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),"mingw","mingw64","bin"))
os.system(gcc_cmd)
os.putenv('PATH', my_path)
else:
os.system(gcc_cmd)
#TODO: Find a better way of letting the system locate librxdmath.so.0
rxdmath_dll = ctypes.cdll[_find_librxdmath()]
dll = ctypes.cdll['%s.so' % os.path.abspath(filename)]
reaction = dll.reaction
reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)]
reaction.restype = ctypes.c_double
os.remove(filename + '.c')
if sys.platform.lower().startswith("win"):
#cannot remove dll that are in use
_windows_dll.append(weakref.ref(dll))
_windows_dll_files.append(filename + ".so")
else:
os.remove(filename + '.so')
return reaction
_h_ptrvector = h.PtrVector
_h_vector = h.Vector
_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)
_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)
def _donothing(): pass
def _setup_units(force=False):
global last_nrn_legacy_units
if initializer.is_initialized():
if(force or last_nrn_legacy_units != h.nrnunit_use_legacy()):
last_nrn_legacy_units = h.nrnunit_use_legacy()
clear_rates()
_setup_memb_currents()
_compile_reactions()
if _cvode_object.active():
_cvode_object.re_init()
def _update_node_data(force=False, newspecies=False):
global last_diam_change_cnt, last_structure_change_cnt
if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:
last_diam_change_cnt = _diam_change_count.value
last_structure_change_cnt = _structure_change_count.value
#if not species._has_3d:
# TODO: merge this with the 3d/hybrid case?
if initializer.is_initialized():
nsegs_changed = 0
for sr in _species_get_all_species():
s = sr()
if s is not None: nsegs_changed += s._update_node_data()
if nsegs_changed or newspecies:
section1d._purge_cptrs()
for sr in _species_get_all_species():
s = sr()
if s is not None:
s._update_region_indices(True)
s._register_cptrs()
#if species._has_1d and species._1d_submatrix_n():
_setup_matrices()
# TODO: separate compiling reactions -- so the indices can be updated without recompiling
_include_flux(True)
_setup_units(force=True)
#end#if
#_curr_scales = _numpy_array(_curr_scales)
def _matrix_to_rxd_sparse(m):
"""precondition: assumes m a numpy array"""
nonzero_i, nonzero_j = list(zip(*list(m.keys())))
nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)
# number of rows
n = m.shape[1]
return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values
# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes
def _setup_matrices():
with initializer._init_lock:
# update _node_fluxes in C
_include_flux()
# TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.
n = len(_node_get_states())
#TODO: Replace with ADI version
"""
if species._has_3d:
_euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None: s._setup_matrices3d(_euler_matrix)
_diffusion_matrix = -_euler_matrix
_euler_matrix = _euler_matrix.tocsr()
_update_node_data(True)
# NOTE: if we also have 1D, this will be replaced with the correct values below
_zero_volume_indices = []
_nonzero_volume_indices = list(range(len(_node_get_states())))
"""
volumes = node._get_data()[0]
zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)
if species._has_1d:
# TODO: initialization is slow. track down why
_last_dt = None
for sr in _species_get_all_species():
s = sr()
if s is not None:
s._assign_parents()
# remove old linearmodeladdition
_linmodadd_cur = None
n = species._1d_submatrix_n()
if n:
# create sparse matrix for C in cy'+gy=b
c_diagonal = numpy.zeros(n,dtype=ctypes.c_double)
# most entries are 1 except those corresponding to the 0 and 1 ends
# create the matrix G
#if not species._has_3d:
# # if we have both, then put the 1D stuff into the matrix that already exists for 3D
from collections import OrderedDict
diffusion_matrix = [OrderedDict() for idx in range(n)]
for sr in _species_get_all_species():
s = sr()
if s is not None:
s._setup_diffusion_matrix(diffusion_matrix)
s._setup_c_matrix(c_diagonal)
#print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)
euler_matrix_i, euler_matrix_j, euler_matrix_nonzero = [], [], []
for i in range(n):
mat_i = diffusion_matrix[i]
euler_matrix_i.extend(itertools.repeat(i,len(mat_i)))
euler_matrix_j.extend(mat_i.keys())
euler_matrix_nonzero.extend(mat_i.values())
euler_matrix_nnonzero = len(euler_matrix_nonzero)
assert(len(euler_matrix_i) == len(euler_matrix_j) == len(euler_matrix_nonzero))
# modify C for cases where no diffusive coupling of 0, 1 ends
# TODO: is there a better way to handle no diffusion?
#for i in range(n):
# if not _diffusion_matrix[i, i]:
# _linmodadd_c[i, i] = 1
#_cvode_object.re_init()
#if species._has_3d:
# _euler_matrix = -_diffusion_matrix
#Hybrid logic
if species._has_1d and species._has_3d:
hybrid_neighbors = collections.defaultdict(lambda: [])
hybrid_vols = collections.defaultdict(lambda: [])
hybrid_diams = {}
grid_id_dc = {}
hybrid_index1d_grid_ids = {}
grid_id_species = {}
index1d_sec1d = {}
hybrid_vols1d = {}
dxs = set()
for sr in _species_get_all_species():
s = sr()
if s is not None:
if s._intracellular_instances and s._secs:
# have both 1D and 3D, so find the neighbors
# for each of the 3D sections, find the parent sections
for r in s._regions:
if r in s._intracellular_instances:
grid_id = s._intracellular_instances[r]._grid_id
grid_id_species.setdefault(grid_id, s._intracellular_instances[r])
grid_id_dc[grid_id] = s.d
dxs.add(r._dx)
for sec in r._secs3d:
parent_seg = sec.trueparentseg()
parent_sec = None if not parent_seg else parent_seg.sec
# are any of these a match with a 1d section?
if s._has_region_section(r, parent_sec):
#this section has a 1d section that is a parent
index1d, indices3d, vols1d, vols3d = _get_node_indices(s, r, sec, sec.orientation(), parent_sec, h.parent_connection(sec=sec))
hybrid_neighbors[index1d] += indices3d
hybrid_vols[index1d] += vols3d
hybrid_diams[index1d] = parent_sec(h.parent_connection(sec=sec)).diam
hybrid_index1d_grid_ids[index1d] = grid_id
index1d_sec1d[index1d] = parent_sec
hybrid_vols1d[index1d] = vols1d
for sec1d in r._secs1d:
parent_1d_seg = sec1d.trueparentseg()
parent_1d = None if not parent_1d_seg else parent_1d_seg.sec
if parent_1d == sec:
# it is the parent of a 1d section
index1d, indices3d, vols1d, vols3d = _get_node_indices(s, r, sec, parent_1d_seg.x , sec1d, sec1d.orientation())
hybrid_neighbors[index1d] += indices3d
hybrid_vols[index1d] += vols3d
hybrid_diams[index1d] = sec1d(h.section_orientation(sec=sec1d)).diam
hybrid_index1d_grid_ids[index1d] = grid_id
index1d_sec1d[index1d] = sec1d
hybrid_vols1d[index1d] = vols1d
if len(dxs) > 1:
raise RxDException('currently require a unique value for dx')
dx = dxs.pop()
rates = []
volumes3d = []
volumes1d = []
grids_dx = []
hybrid_indices1d = []
hybrid_indices3d = []
num_3d_indices_per_1d_seg = []
num_1d_indices_per_grid = []
num_3d_indices_per_grid = []
grid_id_indices1d = collections.defaultdict(lambda: [])
for index1d in hybrid_neighbors:
grid_id = hybrid_index1d_grid_ids[index1d]
grid_id_indices1d[grid_id].append(index1d)
hybrid_grid_ids = sorted(grid_id_indices1d.keys())
for grid_id in hybrid_grid_ids:
sp = grid_id_species[grid_id]
# TODO: use 3D anisotropic diffusion coefficients
dc = grid_id_dc[grid_id]
grids_dx.append(sp._dx**3)
num_1d_indices_per_grid.append(len(grid_id_indices1d[grid_id]))
grid_3d_indices_cnt = 0
for index1d in grid_id_indices1d[grid_id]:
neighbors3d = []
vols3d = []
for neigh, vol in zip(hybrid_neighbors[index1d], hybrid_vols[index1d]):
if neigh not in neighbors3d:
neighbors3d.append(neigh)
vols3d.append(vol)
if len(neighbors3d) < 1:
raise RxDException('No 3D neighbors detected for 1D segment. Try perturbing dx')
sec1d = index1d_sec1d[index1d]
seg_length1d = sec1d.L/sec1d.nseg
if neighbors3d:
hybrid_indices1d.append(index1d)
cnt_neighbors_3d = len(neighbors3d)
num_3d_indices_per_1d_seg.append(cnt_neighbors_3d)
grid_3d_indices_cnt += cnt_neighbors_3d
area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2)
areaT = sum([v**(2.0/3.0) for v in vols3d])
volumes1d.append(hybrid_vols1d[index1d])
for i, vol in zip(neighbors3d, vols3d):
sp._region._vol[i] = vol
ratio = vol**(2.0/3.0) / areaT
rate = ratio * dc * area / (vol * (dx + seg_length1d) / 2)
rates.append(rate)
volumes3d.append(vol)
hybrid_indices3d.append(i)
num_3d_indices_per_grid.append(grid_3d_indices_cnt)
num_1d_indices_per_grid = numpy.asarray(num_1d_indices_per_grid, dtype=numpy.int64)
num_3d_indices_per_grid = numpy.asarray(num_3d_indices_per_grid, dtype=numpy.int64)
hybrid_indices1d = numpy.asarray(hybrid_indices1d, dtype=numpy.int64)
num_3d_indices_per_1d_seg = numpy.asarray(num_3d_indices_per_1d_seg, dtype=numpy.int64)
hybrid_grid_ids = numpy.asarray(hybrid_grid_ids, dtype=numpy.int64)
hybrid_indices3d = numpy.asarray(hybrid_indices3d, dtype=numpy.int64)
rates = numpy.asarray(rates, dtype=numpy.float_)
volumes1d = numpy.asarray(volumes1d, dtype=numpy.float_)
volumes3d = numpy.asarray(volumes3d, dtype=numpy.float_)
dxs = numpy.asarray(grids_dx, dtype=numpy.float_)
set_hybrid_data(num_1d_indices_per_grid, num_3d_indices_per_grid, hybrid_indices1d, hybrid_indices3d, num_3d_indices_per_1d_seg, hybrid_grid_ids, rates, volumes1d, volumes3d, dxs)
#TODO: Replace this this to handle 1d/3d hybrid models
"""
if species._has_1d and species._has_3d:
# TODO: add connections to matrix; for now: find them
hybrid_neighbors = collections.defaultdict(lambda: [])
hybrid_diams = {}
dxs = set()
for sr in list(_species_get_all_species().values()):
s = sr()
if s is not None:
if s._nodes and s._secs:
# have both 1D and 3D, so find the neighbors
# for each of the 3D sections, find the parent sections
for r in s._regions:
dxs.add(r._dx)
for sec in r._secs3d:
parent_seg = sec.trueparentseg()
parent_sec = None if not parent_seg else parent_seg.sec
# are any of these a match with a 1d section?
if s._has_region_section(r, parent_sec):
# this section has a 1d section that is a parent
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_seg.diam
else:
for sec1d in r._secs1d:
parent_1d_seg = sec1d.trueparentseg()
parent_1d = None if not parent_seg else parent_seg.sec
if parent_1d == sec:
# it is the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, sec1d.orientation())
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
elif parent_1d == parent_sec:
# it connects to the parent of a 1d section
index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, sec1d.orientation())
hybrid_neighbors[index1d] += indices3d
hybrid_diams[index1d] = parent_1d_seg.diam
break
if len(dxs) > 1:
raise RxDException('currently require a unique value for dx')
dx = dxs.pop()
diffs = node._diffs
n = len(_node_get_states())
# TODO: validate that we're doing the right thing at boundaries
for index1d in list(hybrid_neighbors.keys()):
neighbors3d = set(hybrid_neighbors[index1d])
# NOTE: splitting the connection area equally across all the connecting nodes
area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)
for i in neighbors3d:
d = diffs[i]
vol = node._volumes[i]
rate = d * area / (vol * dx / 2.)
# make the connections on the 3d side
_euler_matrix[i, i] -= rate
_euler_matrix[i, index1d] += rate
# make the connections on the 1d side (scale by vol because conserving mass not volume)
_euler_matrix[index1d, index1d] -= rate * vol
_euler_matrix[index1d, i] += rate * vol
#print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))
#print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))
"""
#CRxD
setup_solver(_node_get_states(), len(_node_get_states()), zero_volume_indices, len(zero_volume_indices))
if species._has_1d and n and euler_matrix_nnonzero > 0:
section1d._transfer_to_legacy()
set_euler_matrix(n, euler_matrix_nnonzero,
_list_to_clong_array(euler_matrix_i),
_list_to_clong_array(euler_matrix_j),
_list_to_cdouble_array(euler_matrix_nonzero),
c_diagonal)
else:
rxd_set_no_diffusion()
if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:
rxd_setup_conc_ptrs(len(section1d._all_cindices),
_list_to_cint_array(section1d._all_cindices),
_list_to_pyobject_array(section1d._all_cptrs))
else:
free_conc_ptrs()
# we do this last because of performance issues with changing sparsity of csr matrices
"""
if _diffusion_matrix is not None:
_diffusion_matrix = _diffusion_matrix.tocsr()
if _euler_matrix is not None:
_euler_matrix = _euler_matrix.tocsr()
if species._has_1d:
if species._has_3d:
_diffusion_matrix = -_euler_matrix
n = species._1d_submatrix_n()
if n:
matrix = _diffusion_matrix[_zero_volume_indices].tocsr()
indptr = matrix.indptr
matrixdata = matrix.data
count = len(_zero_volume_indices)
for row, i in enumerate(_zero_volume_indices):
d = _diffusion_matrix[i, i]
if d:
matrixdata[indptr[row] : indptr[row + 1]] /= -d
matrix[row, i] = 0
else:
matrixdata[indptr[row] : indptr[row + 1]] = 0
global _mat_for_zero_volume_nodes
_mat_for_zero_volume_nodes = matrix
# TODO: _mat_for_zero_volume_nodes is used for CVode.
# Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)
"""
"""
if pt1 in indices:
ileft = indices[pt1]
dleft = (d + diffs[ileft]) * 0.5
left = dleft * areal / (vol * dx)
euler_matrix[index, ileft] += left
euler_matrix[index, index] -= left
if pt2 in indices:
iright = indices[pt2]
dright = (d + diffs[iright]) * 0.5
right = dright * arear / (vol * dx)
euler_matrix[index, iright] += right
euler_matrix[index, index] -= right
"""
def _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):
#Recalculate the volumes
xlo, xhi = region._mesh_grid['xlo'], region._mesh_grid['xhi']
ylo, yhi = region._mesh_grid['ylo'], region._mesh_grid['yhi']
zlo, zhi = region._mesh_grid['zlo'], region._mesh_grid['zhi']
from . import geometry3d
p3d = int((sec3d.n3d()-1)*x3d)
p1d = int((sec1d.n3d()-1)*x1d)
pt3d = [p3d, p3d + 1] if p3d == 0 else [p3d - 1, p3d]
pt1d = [p1d, p1d + 1] if p1d == 0 else [p1d - 1, p1d]
inter, surf, mesh = geometry3d.voxelize2([sec1d, sec3d], region._dx,
mesh_grid=region._mesh_grid,
relevant_pts=[pt1d, pt3d])
# TODO: remove need for this assumption
assert(x1d in (0, 1))
disc_indices = region._indices_from_sec_x(sec3d, x3d)
#print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)
#print 'disc indices: %r' % disc_indices
indices3d = []
vols3d = []
for point in disc_indices:
if point in _point_indices[region] and _point_indices[region][point] not in indices3d:
indices3d.append(_point_indices[region][point])
vols3d.append(surf[point][0] if point in surf else region.dx**3)
#print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)
# discard duplicates...
# TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that
#print '3d matrix indices: %r' % indices3d
# TODO: remove the need for this assertion
if x1d == sec1d.orientation():
# TODO: make this whole thing more efficient
# the parent node is the nonzero index on the first row before the diagonal
#first_row = min([node._index for node in species.nodes(region)(sec1d)])
index_1d, vol1d = min([(node._index, node.volume) for node in
species.nodes(region)(sec1d)],
key=lambda x: x[0])
"""for j in range(first_row):
if _euler_matrix[first_row, j] != 0:
index_1d = j
break
else:
raise RxDException('should never get here; could not find parent')"""
elif x1d == 1 - sec1d.orientation():
# the ending zero-volume node is the one after the last node
# TODO: make this more efficient
index_1d, vol1d = max([(node._index, node.volume) for node in
species.nodes(region)(sec1d)],
key=lambda x: x[0])
index_1d + 1
else:
raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')
return index_1d, indices3d, vol1d, vols3d
def _compile_reactions():
#clear all previous reactions (intracellular & extracellular) and the
#supporting indexes
#_windows_remove_dlls()
regions_inv = dict() #regions -> reactions that occur there
species_by_region = dict()
all_species_involed = set()
location_count = 0
ecs_regions_inv = dict()
ecs_species_by_region = dict()
ecs_all_species_involed = set()
ecs_mc_species_involved = set()
from . import rate, multiCompartmentReaction
#Find sets of sections that contain the same regions
from .region import _c_region
matched_regions = [] # the different combinations of regions that arise in different sections
rxd_sec_lookup = section1d._SectionLookup()
for nrnsec in rxd_sec_lookup:
set_of_regions = set() # a set of the regions that occur in a given section
for sec in rxd_sec_lookup[nrnsec]:
if sec: set_of_regions.add(sec._region)
if set_of_regions not in matched_regions:
matched_regions.append(set_of_regions)
region._c_region_lookup = dict()
#create a c_region instance for each of the unique sets of regions
c_region_list = []
for sets in matched_regions:
c_region_list.append(_c_region(sets))
for rptr in _all_reactions:
r = rptr()
if not r:
continue
#Find all the species involved
if isinstance(r,rate.Rate):
if not r._species():
continue
sptrs = set([r._species])
else:
sptrs = set(r._dests + r._sources)
if hasattr(r,'_involved_species') and r._involved_species:
sptrs = sptrs.union(set(r._involved_species))
if hasattr(r,'_involved_species_ecs') and r._involved_species_ecs:
sptrs = sptrs.union(set(r._involved_species_ecs))
#Find all the regions involved
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
if not hasattr(r._mult, 'flatten'):
r._update_indices()
react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]
react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]
react_regions += [r._regions[0]]
react_regions = list(set(react_regions))
#if regions are specified - use those
elif hasattr(r,'_active_regions'):
react_regions = r._active_regions
#Otherwise use all the regions where the species are
else:
react_regions = set()
nsp = 0
for sp in sptrs:
s = sp()
nsp += 1
if isinstance(s,species.SpeciesOnRegion):
react_regions.add(s._region())
elif isinstance(s,species.SpeciesOnExtracellular):
react_regions.add(s._extracellular()._region)
elif isinstance(s,species._ExtracellularSpecies):
react_regions.add(s._region)
elif None not in s._regions:
[react_regions.add(reg) for reg in s._regions + s._extracellular_regions]
react_regions = list(react_regions)
#Only regions where ALL the species are present -- unless it is a membrane
#from collections import Counter
#from . import geometry as geo
#react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]
#Any intracellular regions
if not all([isinstance(x, region.Extracellular) for x in react_regions]):
species_involved = []
for sp in sptrs:
s = sp()
if not isinstance(s, species.SpeciesOnExtracellular) and not isinstance(s, species._ExtracellularSpecies):
all_species_involed.add(s)
species_involved.append(s)
for reg in react_regions:
if isinstance(reg, region.Extracellular):
continue
if reg in regions_inv:
regions_inv[reg].append(rptr)
else:
regions_inv[reg] = [rptr]
if reg in species_by_region:
species_by_region[reg] = species_by_region[reg].union(species_involved)
else:
species_by_region[reg] = set(species_involved)
for sec in reg._secs:
location_count += sec.nseg
#Any extracellular regions
if any([isinstance(x, region.Extracellular) for x in react_regions]):
#MultiCompartment - so can have both extracellular and intracellular regions
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
for sp in sptrs:
s = sp()
if isinstance(s, species.SpeciesOnExtracellular):
ecs_mc_species_involved.add(s)
if isinstance(s, species.Species) and s._extracellular_instances:
for ecs in s._extracellular_instances.keys():
ecs_mc_species_involved.add(s[ecs])
for reg in react_regions:
if reg in list(ecs_species_by_region.keys()):
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_mc_species_involved)
#Otherwise - reaction can only have extracellular regions
else:
ecs_species_involved = []
for sp in sptrs:
s = sp()
ecs_all_species_involed.add(s)
ecs_species_involved.append(s)
for reg in react_regions:
if not isinstance(reg, region.Extracellular):
continue
if reg in ecs_regions_inv:
ecs_regions_inv[reg].append(rptr)
else:
ecs_regions_inv[reg] = [rptr]
if reg in ecs_species_by_region:
ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)
else:
ecs_species_by_region[reg] = set(ecs_species_involved)
#Create lists of indexes for intracellular reactions and rates
# a table for location,species -> state index
regions_inv_1d = [reg for reg in regions_inv if reg._secs1d]
regions_inv_1d.sort(key=lambda r: r._id)
all_regions_inv_3d = [reg for reg in regions_inv if reg._secs3d]
#remove extra regions from multicompartment reactions. We only want the membrane
regions_inv_3d = set()
for reg in all_regions_inv_3d:
for rptr in regions_inv[reg]:
r = rptr()
if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
regions_inv_3d.add(r._regions[0])
else:
regions_inv_3d.add(reg)
regions_inv_3d = list(regions_inv_3d)
for reg in regions_inv_1d:
rptr = weakref.ref(reg)
if rptr in region._c_region_lookup:
for c_region in region._c_region_lookup[rptr]:
for react in regions_inv[reg]:
c_region.add_reaction(react, rptr)
c_region.add_species(species_by_region[reg])
if reg in ecs_species_by_region:
c_region.add_ecs_species(ecs_species_by_region[reg])
# now setup the reactions
#if there are no reactions
if location_count == 0 and len(ecs_regions_inv) == 0:
return None
def localize_index(creg, rate):
rate_str = re.sub(r'species\[(\d+)\]\[(\d+)\]',
lambda m: "species[%i][%i]" %
(creg._species_ids.get(int(m.groups()[0])),
creg._region_ids.get(int(m.groups()[1]))), rate)
rate_str = re.sub(r'params\[(\d+)\]\[(\d+)\]',
lambda m: "params[%i][%i]" %
(creg._params_ids.get(int(m.groups()[0])),
creg._region_ids.get(int(m.groups()[1]))), rate_str)
rate_str = re.sub(r'species_3d\[(\d+)\]',
lambda m: "species_3d[%i]" %
creg._ecs_species_ids.get(int(m.groups()[0])), rate_str)
rate_str = re.sub(r'params_3d\[(\d+)\]',
lambda m: "params_3d[%i]" %
creg._ecs_params_ids.get(int(m.groups()[0])), rate_str)
return rate_str
#Setup intracellular and multicompartment reactions
if location_count > 0:
from . import rate, multiCompartmentReaction, Parameter
for creg in c_region_list:
if not creg._react_regions:
continue
creg._initalize()
mc_mult_count = 0
mc_mult_list = []
species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)
flux_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)
ecs_species_ids_used = numpy.zeros((creg.num_ecs_species),bool)
fxn_string = _c_headers
fxn_string += 'void reaction(double** species, double** params, double** rhs, double* mult, double* species_3d, double* params_3d, double* rhs_3d, double** flux, double v)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rprt in creg._react_regions:
if not isinstance(rprt(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
for rptr in _all_reactions:
if rptr not in creg._react_regions:
continue
r = rptr()
if isinstance(r, rate.Rate):
s = r._species()
species_id = creg._species_ids[s._id]
for reg in creg._react_regions[rptr]:
if reg() in r._rate:
try:
region_id = creg._region_ids[reg()._id]
rate_str = localize_index(creg, r._rate[reg()][0])
except KeyError:
warn("Species not on the region specified, %r will be ignored.\n" % r)
continue
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s %s;" % (species_id, region_id, operator, rate_str)
species_ids_used[species_id][region_id] = True
elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
#Lookup the region_id for the reaction
try:
for reg in r._rate:
rate_str = localize_index(creg, r._rate[reg][0])
fxn_string += "\n\trate = %s;" % rate_str
break
except KeyError:
warn("Species not on the region specified, %r will be ignored.\n" % r)
continue
for i, sptr in enumerate(r._sources + r._dests):
s = sptr()
if isinstance(s, species.SpeciesOnExtracellular):
if not isinstance(s, species.ParameterOnExtracellular):
species_id = creg._ecs_species_ids[s._extracellular()._grid_id]
operator = '+=' if ecs_species_ids_used[species_id] else '='
fxn_string += "\n\trhs_3d[%d] %s mult[%d] * rate;" % (species_id, operator, mc_mult_count)
ecs_species_ids_used[species_id] = True
elif not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion):
species_id = creg._species_ids[s._id]
region_id = creg._region_ids[s._region()._id]
operator = '+=' if species_ids_used[species_id][region_id] else '='
fxn_string += "\n\trhs[%d][%d] %s mult[%d] * rate;" % (species_id, region_id, operator, mc_mult_count)
species_ids_used[species_id][region_id] = True
if r._membrane_flux:
operator = '+=' if flux_ids_used[species_id][region_id] else '='
fxn_string += "\n\tif(flux) flux[%d][%d] %s %1.1f * rate;" % (species_id, region_id, operator, r._cur_charges[i])
flux_ids_used[species_id][region_id] = True
#TODO: Fix problem if the whole region isn't part of the same aggregate c_region
mc_mult_count += 1
mc_mult_list.extend(r._mult.flatten())
else:
for reg in creg._react_regions[rptr]:
try:
region_id = creg._region_ids[reg()._id]
rate_str = localize_index(creg, r._rate[reg()][0])
except KeyError:
warn("Species not on the region specified, %r will be ignored.\n" % r)
continue
fxn_string += "\n\trate = %s;" % rate_str
summed_mults = collections.defaultdict(lambda: 0)
for (mult, sp) in zip(r._mult, r._sources + r._dests):
summed_mults[creg._species_ids.get(sp()._id)] += mult
for idx in sorted(summed_mults.keys()):
operator = '+=' if species_ids_used[idx][region_id] else '='
species_ids_used[idx][region_id] = True
fxn_string += "\n\trhs[%d][%d] %s (%g) * rate;" % (idx, region_id, operator, summed_mults[idx])
fxn_string += "\n}\n"
register_rate(creg.num_species, creg.num_params, creg.num_regions,
creg.num_segments, creg.get_state_index(),
creg.num_ecs_species, creg.num_ecs_params,
creg.get_ecs_species_ids(), creg.get_ecs_index(),
mc_mult_count,
numpy.array(mc_mult_list, dtype=ctypes.c_double),
_list_to_pyobject_array(creg._vptrs),
_c_compile(fxn_string))
#Setup intracellular 3D reactions
molecules_per_mM_um3 = constants.molecules_per_mM_um3()
if regions_inv_3d:
for reg in regions_inv_3d:
ics_grid_ids = []
all_ics_gids = set()
ics_param_gids = set()
fxn_string = _c_headers
fxn_string += 'void reaction(double* species_3d, double* params_3d, double*rhs, double* mc3d_mults)\n{'
for rptr in [r for rlist in list(regions_inv.values()) for r in rlist]:
if not isinstance(rptr(), rate.Rate):
fxn_string += '\n\tdouble rate;\n'
break
#if any rates on this region have SpeciesOnRegion, add their grid_ids
#do this in loop above if it is correct
for rptr in [r for rlist in list(regions_inv.values()) for r in rlist]:
r = rptr()
if isinstance(r, rate.Rate):
if reg in r._regions:
for spec_involved in r._involved_species:
#probably should do parameters/states here as well
if isinstance(spec_involved(), species.SpeciesOnRegion):
all_ics_gids.add(spec_involved()._species()._intracellular_instances[spec_involved()._region()]._grid_id)
elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
if reg in r._rate:
for spec_involved in r._involved_species + r._sources + r._dests:
all_ics_gids.add(spec_involved()._species()._intracellular_instances[spec_involved()._region()]._grid_id)
for s in species_by_region[reg]:
spe = s._species() if isinstance(s,species.SpeciesOnRegion) else s
if hasattr(spe, '_intracellular_instances') and spe._intracellular_instances and reg in spe._intracellular_instances:
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):
sp = spe._intracellular_instances[reg]
ics_param_gids.add(sp._grid_id)
else:
###TODO is this correct? are there any other cases I should worry about? Do I always make a species the intracellular instance for the region we are looping through?
sp = spe._intracellular_instances[reg]
all_ics_gids.add(sp._grid_id)
all_ics_gids = list(all_ics_gids)
ics_param_gids = list(ics_param_gids)
if any([isinstance(rptr(), multiCompartmentReaction.MultiCompartmentReaction) for rptr in regions_inv[reg]]):
#the elements in each list contain the indices into the states vector for the intracellular instance that need to be updated
mc3d_region_size = len(reg._xs)
mc3d_indices_start = [species._defined_species_by_gid[index]._mc3d_indices_start(reg) for index in all_ics_gids + ics_param_gids]
else:
mc3d_region_size = 0
mc3d_indices_start = [0 for i in range(len(all_ics_gids + ics_param_gids))]
mults = [[] for i in range(len(all_ics_gids + ics_param_gids))]
for rptr in regions_inv[reg]:
r = rptr()
if reg not in r._rate:
continue
rate_str = re.sub(r'species_3d\[(\d+)\]',lambda m: "species_3d[%i]" % [pid for pid,gid in enumerate(all_ics_gids) if gid == int(m.groups()[0])][0], r._rate[reg][-1])
rate_str = re.sub(r'params_3d\[(\d+)\]',lambda m: "params_3d[%i]" % [pid for pid, gid in enumerate(ics_param_gids) if gid == int(m.groups()[0])][0], rate_str)
if isinstance(r,rate.Rate):
s = r._species()
#Get underlying rxd._IntracellularSpecies for the grid_id
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):
continue
elif isinstance(s, species.Species):
s = s._intracellular_instances[reg]
elif isinstance(s, species.SpeciesOnRegion):
s = s._species()._intracellular_instances[s._region()]
if s._grid_id in ics_grid_ids:
operator = '+='
else:
operator = '='
ics_grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s %s;" % (pid, operator, rate_str)
elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):
if reg in r._regions:
from . import geometry
fxn_string += '\n\trate = ' + rate_str + ";"
for sptr in r._sources:
s = sptr()
if not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion):
s3d = s.instance3d
if s3d._grid_id in ics_grid_ids:
operator = '+='
else:
operator = '='
ics_grid_ids.append(s3d._grid_id)
#Find mult for this grid
for sec in reg._secs3d:
sas = reg._vol
s3d_reg = s3d._region
for seg in sec:
for index in reg._nodes_by_seg[seg]:
#Change this to be by volume
#membrane area / compartment volume / molecules_per_mM_um3
mults[s3d._grid_id].append(sas[index] / (s3d._region._vol[index]) / molecules_per_mM_um3)
pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s3d._grid_id][0]
fxn_string += "\n\trhs[%d] %s -mc3d_mults[%d] * rate;" % (pid, operator, pid)
for sptr in r._dests:
s = sptr()
if not isinstance(s, species.Parameter) and not isinstance(s, species.ParameterOnRegion):
s3d = s.instance3d
if s3d._grid_id in ics_grid_ids:
operator = '+='
else:
operator = '='
ics_grid_ids.append(s3d._grid_id)
#Find mult for this grid
for sec in reg._secs3d:
sas = reg._vol
s3d_reg = s3d._region
for seg in sec:
for index in reg._nodes_by_seg[seg]:
#Change this to be by volume
mults[s3d._grid_id].append(sas[index] / (s3d._region._vol[index]) / molecules_per_mM_um3)
pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s3d._grid_id][0]
fxn_string += "\n\trhs[%d] %s mc3d_mults[%d] * rate;" % (pid, operator, pid)
else:
idx=0
fxn_string += "\n\trate = %s;" % rate_str
for sp in r._sources + r._dests:
s = sp()
#Get underlying rxd._IntracellularSpecies for the grid_id
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnRegion):
idx += 1
continue
if isinstance(s, species.Species):
s = s._intracellular_instances[reg]
elif isinstance(s, species.SpeciesOnRegion):
if s._region() in s._species()._intracellular_instances:
s = s._species()._intracellular_instances[s._region()]
else:
continue
if s._grid_id in ics_grid_ids:
operator = '+='
else:
operator = '='
ics_grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_ics_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s (%s)*rate;" % (pid, operator, r._mult[idx])
idx += 1
fxn_string += "\n}\n"
for i, ele in enumerate(mults):
if ele == []:
mults[i] = numpy.ones(len(reg._xs))
mults = list(itertools.chain.from_iterable(mults))
ics_register_reaction(0, len(all_ics_gids), len(ics_param_gids), _list_to_cint_array(all_ics_gids + ics_param_gids), numpy.asarray(mc3d_indices_start), mc3d_region_size, numpy.asarray(mults), _c_compile(fxn_string))
#Setup extracellular reactions
if len(ecs_regions_inv) > 0:
for reg in ecs_regions_inv:
grid_ids = []
all_gids = set()
param_gids = set()
fxn_string = _c_headers
#TODO: find the nrn include path in python
#It is necessary for a couple of function in python that are not in math.h
fxn_string += 'void reaction(double* species_3d, double* params_3d, double* rhs)\n{'
# declare the "rate" variable if any reactions (non-rates)
for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:
if not isinstance(rptr(),rate.Rate):
fxn_string += '\n\tdouble rate;'
break
#get a list of all grid_ids involved
for s in ecs_species_by_region[reg]:
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):
sp = s[reg] if isinstance(s, species.Species) else s
param_gids.add(sp._extracellular()._grid_id if isinstance(sp, species.SpeciesOnExtracellular) else sp._grid_id)
else:
sp = s[reg] if isinstance(s, species.Species) else s
all_gids.add(sp._extracellular()._grid_id if isinstance(sp, species.SpeciesOnExtracellular) else sp._grid_id)
all_gids = list(all_gids)
param_gids = list(param_gids)
for rptr in ecs_regions_inv[reg]:
r = rptr()
rate_str = re.sub(r'species_3d\[(\d+)\]',lambda m: "species_3d[%i]" % [pid for pid, gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs[reg][-1])
rate_str = re.sub(r'params_3d\[(\d+)\]',lambda m: "params_3d[%i]" % [pid for pid, gid in enumerate(param_gids) if gid == int(m.groups()[0])][0], rate_str)
if isinstance(r,rate.Rate):
s = r._species()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):
continue
elif isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s %s;" % (pid, operator, rate_str)
else:
idx=0
fxn_string += "\n\trate = %s;" % rate_str
for sp in r._sources + r._dests:
s = sp()
#Get underlying rxd._ExtracellularSpecies for the grid_id
if isinstance(s, species.Parameter) or isinstance(s, species.ParameterOnExtracellular):
idx += 1
continue
if isinstance(s, species.Species):
s = s[reg]._extracellular()
elif isinstance(s, species.SpeciesOnExtracellular):
s = s._extracellular()
if s._grid_id in grid_ids:
operator = '+='
else:
operator = '='
grid_ids.append(s._grid_id)
pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]
fxn_string += "\n\trhs[%d] %s (%s)*rate;" % (pid, operator, r._mult[idx])
idx += 1
fxn_string += "\n}\n"
ecs_register_reaction(0, len(all_gids), len(param_gids),
_list_to_cint_array(all_gids + param_gids),
_c_compile(fxn_string))
def _init():
if len(species._all_species) == 0:
return None
initializer._do_init()
# TODO: check about the 0<x<1 problem alluded to in the documentation
h.define_shape()
# if the shape has changed update the nodes
_update_node_data()
if species._has_1d:
section1d._purge_cptrs()
for sr in _species_get_all_species():
s = sr()
if s is not None:
# TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)
s._register_cptrs()
s._finitialize()
_setup_matrices()
#if species._has_1d and species._1d_submatrix_n():
#volumes = node._get_data()[0]
#zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)
#setup_solver(_node_get_states(), len(_node_get_states()), zero_volume_indices, len(zero_volume_indices), h._ref_t, h._ref_dt)
clear_rates()
_setup_memb_currents()
_compile_reactions()
def _include_flux(force=False):
from .node import _node_fluxes
from . import node
if force or node._has_node_fluxes:
index1D = []
source1D = []
scale1D = []
grids = dict()
for idx, t, src, sc, rptr in zip(_node_fluxes['index'],
_node_fluxes['type'],
_node_fluxes['source'],
_node_fluxes['scale'],
_node_fluxes['region']):
if t == -1:
index1D.append(idx)
source1D.append(src)
scale1D.append(sc * node._volumes[idx])
else:
gid = t
if gid not in grids:
grids[gid] = {'index':[], 'source': [], 'scale':[]}
grids[gid]['index'].append(idx)
grids[gid]['source'].append(src)
grids[gid]['scale'].append(sc * rptr().volume(idx))
counts3D = []
grids3D = sorted(grids.keys())
index3D = []
source3D = []
scale3D = []
for gid in grids3D:
counts3D.append(len(grids[gid]['index']))
index3D.extend(grids[gid]['index'])
source3D.extend(grids[gid]['source'])
scale3D.extend(grids[gid]['scale'])
rxd_include_node_flux1D(len(index1D), _list_to_clong_array(index1D),
_list_to_cdouble_array(scale1D),
_list_to_pyobject_array(source1D))
rxd_include_node_flux3D(len(grids3D), _list_to_cint_array(counts3D),
_list_to_cint_array(grids3D),
_list_to_clong_array(index3D),
_list_to_cdouble_array(scale3D),
_list_to_pyobject_array(source3D))
node._has_node_fluxes = False
def _init_concentration():
if len(species._all_species) == 0:
return None
for sr in _species_get_all_species():
s = sr()
if s is not None:
# TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)
s._finitialize()
_has_nbs_registered = False
_nbs = None
do_setup_matrices_fptr = None
do_setup_units_fptr = None
def _do_nbs_register():
global _has_nbs_registered, _nbs, _fih, _fih2, _fih3, do_setup_matrices_fptr, do_setup_units_fptr
if not _has_nbs_registered:
#from neuron import nonvint_block_supervisor as _nbs
_has_nbs_registered = True
#_nbs.register(_callbacks) not used
#
# register the initialization handler and the ion register handler
#
_fih = h.FInitializeHandler(_init_concentration)
_fih3 = h.FInitializeHandler(3, _init)
set_setup_matrices = nrn_dll_sym('set_setup_matrices')
set_setup_matrices.argtypes = [fptr_prototype]
do_setup_matrices_fptr = fptr_prototype(_setup_matrices)
set_setup_matrices(do_setup_matrices_fptr)
set_setup_units = nrn_dll_sym('set_setup_units')
set_setup_units.argtypes = [fptr_prototype]
do_setup_units_fptr = fptr_prototype(_setup_units)
set_setup_units(do_setup_units_fptr)
_fih2 = h.FInitializeHandler(3, initializer._do_ion_register)
#
# register scatter/gather mechanisms
#
_cvode_object.extra_scatter_gather(0, _after_advance)
# register the Python callbacks
do_setup_fptr = fptr_prototype(_setup)
do_initialize_fptr = fptr_prototype(_init)
set_setup(do_setup_fptr)
set_initialize(do_initialize_fptr)
def _windows_remove_dlls():
global _windows_dll_files, _windows_dll
for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):
dll = dll_ptr()
if dll:
handle = dll._handle
del dll
ctypes.windll.kernel32.FreeLibrary(handle)
os.remove(filepath)
_windows_dll_files = []
_windows_dll = []
def nthread(n=None):
if(n):
_set_num_threads(n)
return _get_num_threads()
|
[] |
[] |
[
"CC",
"PATH"
] |
[]
|
["CC", "PATH"]
|
python
| 2 | 0 | |
pkg/runtime/security/auth.go
|
package security
import (
"context"
"crypto/x509"
"encoding/pem"
"os"
"sync"
"time"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
dapr_credentials "github.com/dapr/dapr/pkg/credentials"
diag "github.com/dapr/dapr/pkg/diagnostics"
sentryv1pb "github.com/dapr/dapr/pkg/proto/sentry/v1"
)
const (
TLSServerName = "cluster.local"
sentrySignTimeout = time.Second * 5
certType = "CERTIFICATE"
sentryMaxRetries = 100
)
// k8s集群内部pod
var kubeTknPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
func GetKubeTknPath() *string {
return &kubeTknPath
}
type Authenticator interface {
GetTrustAnchors() *x509.CertPool // 获得信任的锚点
GetCurrentSignedCert() *SignedCertificate // 获取当前签名了的证书
CreateSignedWorkloadCert(id, namespace, trustDomain string) (*SignedCertificate, error)// 创建签名负载证书
}
type authenticator struct {
trustAnchors *x509.CertPool
certChainPem []byte
keyPem []byte
genCSRFunc func(id string) ([]byte, []byte, error)
sentryAddress string
currentSignedCert *SignedCertificate
certMutex *sync.RWMutex
}
type SignedCertificate struct {
WorkloadCert []byte // 签名后的证书
PrivateKeyPem []byte // 私钥
Expiry time.Time // 过期的时间点
TrustChain *x509.CertPool // 证书链
}
func newAuthenticator(sentryAddress string, trustAnchors *x509.CertPool, certChainPem, keyPem []byte, genCSRFunc func(id string) ([]byte, []byte, error)) Authenticator {
return &authenticator{
trustAnchors: trustAnchors,
certChainPem: certChainPem,
keyPem: keyPem,
genCSRFunc: genCSRFunc,
sentryAddress: sentryAddress,
certMutex: &sync.RWMutex{},
}
}
// GetTrustAnchors returns the extracted root cert that serves as the trust anchor.
// 返回根证书。
func (a *authenticator) GetTrustAnchors() *x509.CertPool {
return a.trustAnchors
}
// GetCurrentSignedCert 返回当前的最新的签名的证书
func (a *authenticator) GetCurrentSignedCert() *SignedCertificate {
a.certMutex.RLock()
defer a.certMutex.RUnlock()
return a.currentSignedCert
}
// CreateSignedWorkloadCert 返回一个签名的负载证书,PEM编码的私钥和签名证书的持续时间。
func (a *authenticator) CreateSignedWorkloadCert(id, namespace, trustDomain string) (*SignedCertificate, error) {
// pkg/runtime/security/security.go:64
// 证书签名 , EC 私钥
csrb, pkPem, err := a.genCSRFunc(id) // id ==APPID 会将它封装到DNSNAMES里 []string
if err != nil {
return nil, err
}
certPem := pem.EncodeToMemory(&pem.Block{Type: certType, Bytes: csrb})
config, err := dapr_credentials.TLSConfigFromCertAndKey(a.certChainPem, a.keyPem, TLSServerName, a.trustAnchors)
if err != nil {
return nil, errors.Wrap(err, "failed to create tls config from cert and key")
}
unaryClientInterceptor := grpc_retry.UnaryClientInterceptor()
if diag.DefaultGRPCMonitoring.IsEnabled() {
unaryClientInterceptor = grpc_middleware.ChainUnaryClient(
unaryClientInterceptor,
diag.DefaultGRPCMonitoring.UnaryClientInterceptor(),
)
}
conn, err := grpc.Dial(
a.sentryAddress,
grpc.WithTransportCredentials(credentials.NewTLS(config)),
grpc.WithUnaryInterceptor(unaryClientInterceptor))
if err != nil {
diag.DefaultMonitoring.MTLSWorkLoadCertRotationFailed("sentry_conn")
return nil, errors.Wrap(err, "error establishing connection to sentry")
}
defer conn.Close()
c := sentryv1pb.NewCAClient(conn)
// 签署证书,发送证书前
// error validating requester identity: csr validation failed: invalid token: [invalid bearer token, Token has been invalidated]
// error validating requester identity: csr validation failed: token/id mismatch. received id: dp-618b5e4aa5ebc3924db86860-workerapp-54683-7f8d646556-vf58h
resp, err := c.SignCertificate(context.Background(),
&sentryv1pb.SignCertificateRequest{
CertificateSigningRequest: certPem,
Id: getSentryIdentifier(id),
Token: getToken(),
TrustDomain: trustDomain,
Namespace: namespace,
}, grpc_retry.WithMax(sentryMaxRetries), grpc_retry.WithPerRetryTimeout(sentrySignTimeout))
if err != nil {
diag.DefaultMonitoring.MTLSWorkLoadCertRotationFailed("sign")
return nil, errors.Wrap(err, "error from sentry SignCertificate")
}
workloadCert := resp.GetWorkloadCertificate() // 签名以后的证书
validTimestamp := resp.GetValidUntil() // 过期日期
if err = validTimestamp.CheckValid(); err != nil {
diag.DefaultMonitoring.MTLSWorkLoadCertRotationFailed("invalid_ts")
return nil, errors.Wrap(err, "error parsing ValidUntil")
}
// 类型转换
expiry := validTimestamp.AsTime()
trustChain := x509.NewCertPool()
for _, c := range resp.GetTrustChainCertificates() {
ok := trustChain.AppendCertsFromPEM(c)
if !ok {
diag.DefaultMonitoring.MTLSWorkLoadCertRotationFailed("chaining")
return nil, errors.Wrap(err, "failed adding trust chain cert to x509 CertPool")
}
}
signedCert := &SignedCertificate{
WorkloadCert: workloadCert,
PrivateKeyPem: pkPem,
Expiry: expiry,
TrustChain: trustChain,
}
a.certMutex.Lock()
defer a.certMutex.Unlock()
a.currentSignedCert = signedCert
return signedCert, nil
}
// 目前我们支持Kubernetes的身份。
func getToken() string {
b, _ := os.ReadFile(kubeTknPath)
return string(b)
}
func getSentryIdentifier(appID string) string {
// return injected identity, default id if not present
localID := os.Getenv("SENTRY_LOCAL_IDENTITY")
if localID != "" {
return localID
}
return appID
}
|
[
"\"SENTRY_LOCAL_IDENTITY\""
] |
[] |
[
"SENTRY_LOCAL_IDENTITY"
] |
[]
|
["SENTRY_LOCAL_IDENTITY"]
|
go
| 1 | 0 | |
test/sanity/issue7305-nwfaketop-cookie/test.py
|
import time
import os
import subprocess
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common import utils
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
testdir = os.path.dirname(os.path.abspath(__file__))
os.chdir(testdir)
try:
os.remove('svrlog.txt')
except:
pass
try:
os.remove('port.txt')
except:
pass
server = subprocess.Popen(['python', '../http-server-node.py', 'server.js'])
while not os.path.exists('port.txt') :
time.sleep(1)
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options, service_log_path="log", service_args=["--verbose"])
try:
print driver.current_url
driver.implicitly_wait(10)
driver.switch_to_frame(driver.find_element_by_tag_name("iframe"))
result = driver.find_element_by_id('result').get_attribute('innerHTML')
print result
assert(result == 'same-site-cookie=foo; cross-site-cookie=bar; no-samesite-cookie=nee')
driver.refresh()
driver.switch_to_frame(driver.find_element_by_tag_name("iframe"))
result = driver.find_element_by_id('result').get_attribute('innerHTML')
print result
assert(result == 'same-site-cookie=foo; cross-site-cookie=bar; no-samesite-cookie=nee')
f = open('svrlog.txt', 'r')
svrlog = ''.join(f.readlines())
f = open('expected.txt', 'r')
expected = ''.join(f.readlines())
assert (svrlog == expected)
finally:
import platform
if platform.system() == 'Windows':
subprocess.call(['taskkill', '/F', '/T', '/PID', str(server.pid)])
else:
server.terminate()
driver.quit()
|
[] |
[] |
[
"CHROMEDRIVER"
] |
[]
|
["CHROMEDRIVER"]
|
python
| 1 | 0 | |
cmd/convox-env/main.go
|
package main
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/convox/rack/pkg/crypt"
)
func main() {
if len(os.Args) == 1 {
fmt.Fprintf(os.Stderr, "usage: convox-env <command>\n")
os.Exit(1)
}
cenv, err := fetchConvoxEnv()
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: could not fetch convox env: %s\n", err)
os.Exit(1)
}
env := mergeEnv(os.Environ(), cenv)
err = exec(os.Args[1], os.Args[2:], env)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
os.Exit(1)
}
}
func fetchConvoxEnv() ([]string, error) {
eu := os.Getenv("CONVOX_ENV_URL")
if eu == "" {
return nil, nil
}
u, err := url.Parse(eu)
if err != nil {
return nil, err
}
if u.Scheme != "s3" {
return nil, fmt.Errorf("unrecognized env url")
}
res, err := S3().GetObject(&s3.GetObjectInput{
Bucket: aws.String(u.Host),
Key: aws.String(u.Path),
})
if err != nil {
return nil, err
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if k := os.Getenv("CONVOX_ENV_KEY"); k != "" {
dec, err := crypt.New().Decrypt(k, data)
if err != nil {
return nil, err
}
data = dec
}
env := []string{}
sc := bufio.NewScanner(bytes.NewReader(data))
allowed := map[string]bool{}
if ev := os.Getenv("CONVOX_ENV_VARS"); ev != "" {
for _, v := range strings.Split(ev, ",") {
allowed[v] = true
}
}
for sc.Scan() {
if s := sc.Text(); s != "" {
if allowed["*"] || allowed[strings.Split(s, "=")[0]] {
env = append(env, s)
}
}
}
return env, nil
}
func mergeEnv(envs ...[]string) []string {
merged := map[string]string{}
for _, env := range envs {
for _, kv := range env {
if parts := strings.SplitN(kv, "=", 2); len(parts) == 2 {
merged[parts[0]] = parts[1]
}
}
}
keys := []string{}
for k := range merged {
keys = append(keys, k)
}
sort.Strings(keys)
final := []string{}
for _, k := range keys {
final = append(final, fmt.Sprintf("%s=%s", k, merged[k]))
}
return final
}
func S3() *s3.S3 {
pool := x509.NewCertPool()
pool.AppendCertsFromPEM([]byte(caCertificates))
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
RootCAs: pool,
},
}
client := http.DefaultClient
client.Transport = tr
return s3.New(session.New(), &aws.Config{
Region: aws.String(os.Getenv("AWS_REGION")),
HTTPClient: client,
S3ForcePathStyle: aws.Bool(true),
})
}
|
[
"\"CONVOX_ENV_URL\"",
"\"CONVOX_ENV_KEY\"",
"\"CONVOX_ENV_VARS\"",
"\"AWS_REGION\""
] |
[] |
[
"CONVOX_ENV_VARS",
"CONVOX_ENV_URL",
"CONVOX_ENV_KEY",
"AWS_REGION"
] |
[]
|
["CONVOX_ENV_VARS", "CONVOX_ENV_URL", "CONVOX_ENV_KEY", "AWS_REGION"]
|
go
| 4 | 0 | |
lib/matplotlib/units.py
|
"""
The classes here provide support for using custom classes with
matplotlib, e.g., those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
'convert value to a scalar or array'
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
return 'date'
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.cbook import iterable, is_numlike
import numpy as np
class AxisInfo(object):
"""information to support default axis labeling and tick labeling, and
default limits"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
default_limits: the default min, max of the axis if no data is present
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface(object):
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
@staticmethod
def axisinfo(unit, axis):
'return an units.AxisInfo instance for axis with the specified units'
return None
@staticmethod
def default_units(x, axis):
'return the default unit for x or None for the given axis'
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
return the converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
@staticmethod
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except AttributeError:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None and iterable(x):
for thisx in x:
# Make sure that recursing might actually lead to a solution,
# if we are just going to re-examine another item of the same
# kind, then do not look at it.
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
ucloud-sdk-java-unet/src/test/java/cn/ucloud/unet/client/DescribeVIPTest.java
|
package cn.ucloud.unet.client;
import cn.ucloud.unet.model.DescribeVIPParam;
import cn.ucloud.unet.model.DescribeVIPResult;
import cn.ucloud.common.pojo.Account;
import cn.ucloud.unet.pojo.UnetConfig;
import org.junit.Before;
import org.junit.Test;
/**
* @description:
* @author: codezhang
* @date: 2018-09-27 14:52
**/
public class DescribeVIPTest {
private UnetClient client;
private DescribeVIPParam param;
@Before
public void initData() {
client = new DefaultUnetClient(new UnetConfig(
new Account(System.getenv("UCloudPrivateKey"),
System.getenv("UCloudPublicKey"))));
param = new DescribeVIPParam("cn-sh2");
}
@Test
public void describeVIP() {
try {
DescribeVIPResult describeVIPResult = client.describeVIP(param);
JSONComparator.jsonComparator(describeVIPResult);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
[
"\"UCloudPrivateKey\"",
"\"UCloudPublicKey\""
] |
[] |
[
"UCloudPrivateKey",
"UCloudPublicKey"
] |
[]
|
["UCloudPrivateKey", "UCloudPublicKey"]
|
java
| 2 | 0 | |
e2e_ping.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from contextlib import closing
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import os
import time
CONSTANT_SLEEP = 15
INCREMENT_SLEEP = 17
MULTIPLIER_SLEEP = 2
N = 3
HTTP_CODES = [
200, 302
]
def get_host_variable():
try:
return os.environ['HOST_APPENGINE']
except KeyError:
return None
def get_url_variable():
try:
return os.environ['URL_PING']
except KeyError:
return None
def build_appengine_host(host_name, url):
assert isinstance(host_name, str) is True
assert isinstance(url, str) is True
return 'https://{host}.appspot.com/{url}'.format(host=host_name, url=url)
def change_time_to_sleep(time_to_sleep):
time_to_sleep += (INCREMENT_SLEEP * MULTIPLIER_SLEEP)
return time_to_sleep
def main():
host = get_host_variable()
url = get_url_variable()
if host is None or url is None:
raise Exception('HOST or URL_PING variable is empty')
time_to_sleep = CONSTANT_SLEEP
c = 0
url = build_appengine_host(host, url)
while c < N:
try:
with closing(urlopen(url)) as response:
assert int(response.code) in HTTP_CODES
break
except Exception as e:
print('{}'.format(e))
time.sleep(time_to_sleep)
time_to_sleep = change_time_to_sleep(time_to_sleep)
c += 1
if c >= N:
raise Exception('Something happened with your connection trying to ping {}'.format(url))
print('Test passed!')
if __name__ == '__main__':
main()
|
[] |
[] |
[
"HOST_APPENGINE",
"URL_PING"
] |
[]
|
["HOST_APPENGINE", "URL_PING"]
|
python
| 2 | 0 | |
tests/integration/modules/test_mac_group.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
import random
import string
# Import Salt Testing Libs
import tests.integration as integration
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import Salt Libs
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
def __random_string(size=6):
'''
Generates a random username
'''
return 'RS-' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
# Create group name strings for tests
ADD_GROUP = __random_string()
DEL_GROUP = __random_string()
CHANGE_GROUP = __random_string()
ADD_USER = __random_string()
REP_USER_GROUP = __random_string()
class MacGroupModuleTest(integration.ModuleCase):
'''
Integration tests for the mac_group module
'''
def setUp(self):
'''
Sets up test requirements
'''
os_grain = self.run_function('grains.item', ['kernel'])
if os_grain['kernel'] not in 'Darwin':
self.skipTest(
'Test not applicable to \'{kernel}\' kernel'.format(
**os_grain
)
)
@destructiveTest
@skipIf(os.geteuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_group_add(self, grains=None):
'''
Tests the add group function
'''
try:
self.run_function('group.add', [ADD_GROUP, 3456])
group_info = self.run_function('group.info', [ADD_GROUP])
self.assertEqual(group_info['name'], ADD_GROUP)
except CommandExecutionError:
self.run_function('group.delete', [ADD_GROUP])
raise
@destructiveTest
@skipIf(os.geteuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_group_delete(self, grains=None):
'''
Tests the delete group function
'''
# Create a group to delete - If unsuccessful, skip the test
if self.run_function('group.add', [DEL_GROUP, 4567]) is not True:
self.run_function('group.delete', [DEL_GROUP])
self.skipTest('Failed to create a group to delete')
try:
# Now try to delete the added group
ret = self.run_function('group.delete', [DEL_GROUP])
self.assertTrue(ret)
except CommandExecutionError:
raise
@destructiveTest
@skipIf(os.getuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_group_chgid(self, grains=None):
'''
Tests changing the group id
'''
# Create a group to delete - If unsuccessful, skip the test
if self.run_function('group.add', [CHANGE_GROUP, 5678]) is not True:
self.run_function('group.delete', [CHANGE_GROUP])
self.skipTest('Failed to create a group to manipulate')
try:
self.run_function('group.chgid', [CHANGE_GROUP, 6789])
group_info = self.run_function('group.info', [CHANGE_GROUP])
self.assertEqual(group_info['gid'], 6789)
except AssertionError:
self.run_function('group.delete', [CHANGE_GROUP])
raise
@destructiveTest
@skipIf(os.getuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_adduser(self, grains=None):
'''
Tests adding user to the group
'''
# Create a group to use for test - If unsuccessful, skip the test
if self.run_function('group.add', [ADD_GROUP, 5678]) is not True:
self.run_function('group.delete', [ADD_GROUP])
self.skipTest('Failed to create a group to manipulate')
try:
self.run_function('group.adduser', [ADD_GROUP, ADD_USER])
group_info = self.run_function('group.info', [ADD_GROUP])
self.assertEqual(ADD_USER, ''.join(group_info['members']))
except AssertionError:
self.run_function('group.delete', [ADD_GROUP])
raise
@destructiveTest
@skipIf(os.getuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_deluser(self, grains=None):
'''
Test deleting user from a group
'''
# Create a group to use for test - If unsuccessful, skip the test
if self.run_function('group.add', [ADD_GROUP, 5678]) and \
self.run_function('group.adduser', [ADD_GROUP, ADD_USER]) is not True:
self.run_function('group.delete', [ADD_GROUP])
self.skipTest('Failed to create a group to manipulate')
delusr = self.run_function('group.deluser', [ADD_GROUP, ADD_USER])
self.assertTrue(delusr)
group_info = self.run_function('group.info', [ADD_GROUP])
self.assertNotIn(ADD_USER, ''.join(group_info['members']))
@destructiveTest
@skipIf(os.getuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_members(self, grains=None):
'''
Test replacing members of a group
'''
if self.run_function('group.add', [ADD_GROUP, 5678]) and \
self.run_function('group.adduser', [ADD_GROUP, ADD_USER]) is not True:
self.run_function('group.delete', [ADD_GROUP])
self.skipTest('Failed to create the {0} group or add user {1} to group '
'to manipulate'.format(ADD_GROUP,
ADD_USER))
rep_group_mem = self.run_function('group.members',
[ADD_GROUP, REP_USER_GROUP])
self.assertTrue(rep_group_mem)
# ensure new user is added to group and previous user is removed
group_info = self.run_function('group.info', [ADD_GROUP])
self.assertIn(REP_USER_GROUP, str(group_info['members']))
self.assertNotIn(ADD_USER, str(group_info['members']))
@destructiveTest
@skipIf(os.getuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def test_mac_getent(self, grains=None):
'''
Test returning info on all groups
'''
if self.run_function('group.add', [ADD_GROUP, 5678]) and \
self.run_function('group.adduser', [ADD_GROUP, ADD_USER])is not True:
self.run_function('group.delete', [ADD_GROUP])
self.skipTest('Failed to create the {0} group or add user {1} to group '
'to manipulate'.format(ADD_GROUP,
ADD_USER))
getinfo = self.run_function('group.getent')
self.assertTrue(getinfo)
self.assertIn(ADD_GROUP, str(getinfo))
self.assertIn(ADD_USER, str(getinfo))
@destructiveTest
@skipIf(os.geteuid() != 0, 'You must be logged in as root to run this test')
@requires_system_grains
def tearDown(self, grains=None):
'''
Clean up after tests
'''
# Delete ADD_GROUP
add_info = self.run_function('group.info', [ADD_GROUP])
if add_info:
self.run_function('group.delete', [ADD_GROUP])
# Delete DEL_GROUP if something failed
del_info = self.run_function('group.info', [DEL_GROUP])
if del_info:
self.run_function('group.delete', [DEL_GROUP])
# Delete CHANGE_GROUP
change_info = self.run_function('group.info', [CHANGE_GROUP])
if change_info:
self.run_function('group.delete', [CHANGE_GROUP])
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
wallet.py
|
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import pyqtSignal
import sys
from random import randint
from jsonMaker import JsonPackets
from vncCrypto import VncCrypto
import redis, json, time, os
import pycurl, random
import requests
import threading
class tranCreator(QObject):
sendPacket = pyqtSignal(str, str, int)
def __init__(self, slaveNodes:list):
super().__init__()
self.slavesNodes = slaveNodes
self.cryptor = VncCrypto()
#------------------------------------------------
self.cryptor.generateKeys()
self.redis_host = os.getenv("REDIS_PORT_6379_TCP_ADDR") or 'localhost'
self.redis_port = os.getenv("REDIS_PORT_6379_TCP_PORT") or '6379'
self.redis = redis.StrictRedis(self.redis_host, self.redis_port, db=1)
self.secondKeys = []
self.secondPKeys = []
self.secondKeys.append("TEST PRIVATE KEY")
self.secondPKeys.append("TEST PUBLIC KEY")
cryptor = VncCrypto()
for i in range(1, 100):
cryptor.generateKeys()
self.secondKeys.append(cryptor.getPrivateKey())
self.secondPKeys.append(cryptor.getPublicKey())
self.url = "http://explorer.vinci.id:5000"
#self.url = "http://192.168.192.42:5000"
def sendTransactionMainAV(self):
cryptor = VncCrypto()
cryptor.setPrivateKey("TEST PUBLIC KEY")
packetAT = JsonPackets.applicantTransaction(cryptor, "127.0.0.1")
jpacketAT = json.loads(packetAT)
response = requests.post(self.url + "/wallet/transaction", json=jpacketAT)
print(response.status_code)
i = 0
while (i < 50):
i += 1
receiver = "0323f264fd64a684db1e36a2c97b58867e0625f797008206216576fea2114bdbca"
packetVT = JsonPackets.voteTransaction(cryptor, receiver, randint(1, 10))
jpacketVT = json.loads(packetVT)
response = requests.post(self.url + "/wallet/transaction", json=jpacketVT)
print(response.status_code)
def sendTransactionMain(self):
cryptor = VncCrypto()
cryptor.setPrivateKey("TEST PUBLIC KEY")
while (True):
receiver = random.choice(self.secondPKeys)
tcount = randint(0, 1) + randint(0,1000)/10000
packet = JsonPackets.standartTransaction(cryptor, receiver, tcount, "VNC")
jpacket = json.loads(packet)
response = requests.post(self.url + "/wallet/transaction", data=packet)
print (response.status_code)
time.sleep(0.2)
def sendTransactionSecond(self):
cryptor = VncCrypto()
cryptor.setPrivateKey(random.choice(self.secondKeys))
while (True):
receiver = random.choice(self.secondPKeys)
tcount = randint(0,100)/10000
packet = JsonPackets.standartTransaction(cryptor, receiver, tcount, "VNC")
jpacket = json.loads(packet)
print(packet)
print(jpacket)
response = requests.post(self.url + "/wallet/transaction", data=packet)
print (response.status_code)
time.sleep(5)
def smartWallet(self, countOfThreads:int):
temp = threading.Thread(target=self.sendTransactionMain)
temp.start()
# temp2 = threading.Thread(target=self.sendTransactionMainAV)
# temp2.start()
for i in range(0, countOfThreads):
temp = threading.Thread(target=self.sendTransactionSecond)
temp.start()
def startFastSendTransactions(self):
cryptor = VncCrypto()
cryptor.setPrivateKey("TEST PUBLIC KEY")
sender = cryptor.getPublicKey()
receiver = sender.replace("1", "2")
while (True):
tcount = randint(1, 9999) / 10000000
packet = JsonPackets.standartTransacton2(cryptor, receiver, tcount, "VNC")
self.redis.zadd("RAW TRANSACTIONS", 1, packet)
return True
if __name__ == '__main__':
app = QCoreApplication(sys.argv)
#--------------------------------------------
wallet = tranCreator(['192.168.0.35'])
wallet.smartWallet(3)
# netEngine = NetEngine()
# wallet.sendPacket.connect(netEngine.sendPacketSignal)
# netEngine.setRemoteAddresses.emit(['192.168.0.35'])
#--------------------------------------------
sys.exit(app.exec())
|
[] |
[] |
[
"REDIS_PORT_6379_TCP_ADDR",
"REDIS_PORT_6379_TCP_PORT"
] |
[]
|
["REDIS_PORT_6379_TCP_ADDR", "REDIS_PORT_6379_TCP_PORT"]
|
python
| 2 | 0 | |
vendor/github.com/openshift/service-serving-cert-signer/pkg/cmd/servingcertsigner/cmd.go
|
package servingcertsigner
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/util/logs"
"github.com/golang/glog"
configv1 "github.com/openshift/api/config/v1"
operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
servicecertsignerv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/serviceability"
"github.com/openshift/service-serving-cert-signer/pkg/controller/servingcert"
"github.com/openshift/service-serving-cert-signer/pkg/version"
)
var (
componentName = "openshift-service-serving-cert-signer-serving-ca"
componentNamespace = "openshift-service-cert-signer"
configScheme = runtime.NewScheme()
)
func init() {
if err := operatorv1alpha1.AddToScheme(configScheme); err != nil {
panic(err)
}
if err := servicecertsignerv1alpha1.AddToScheme(configScheme); err != nil {
panic(err)
}
}
type ControllerCommandOptions struct {
basicFlags *controllercmd.ControllerFlags
}
func NewController() *cobra.Command {
o := &ControllerCommandOptions{
basicFlags: controllercmd.NewControllerFlags(),
}
cmd := &cobra.Command{
Use: "serving-cert-signer",
Short: "Start the Service Serving Cert Signer controller",
Run: func(cmd *cobra.Command, args []string) {
// boiler plate for the "normal" command
rand.Seed(time.Now().UTC().UnixNano())
logs.InitLogs()
defer logs.FlushLogs()
defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), version.Get())()
defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()
serviceability.StartProfiler()
if err := o.basicFlags.Validate(); err != nil {
glog.Fatal(err)
}
if err := o.StartController(); err != nil {
glog.Fatal(err)
}
},
}
o.basicFlags.AddFlags(cmd)
return cmd
}
// StartController runs the controller
func (o *ControllerCommandOptions) StartController() error {
uncastConfig, err := o.basicFlags.ToConfigObj(configScheme, servicecertsignerv1alpha1.SchemeGroupVersion)
if err != nil {
return err
}
// TODO this and how you get the leader election and serving info are the only unique things here
config, ok := uncastConfig.(*servicecertsignerv1alpha1.ServiceServingCertSignerConfig)
if !ok {
return fmt.Errorf("unexpected config: %T", uncastConfig)
}
opts := &servingcert.ServingCertOptions{Config: config, LeaderElection: configv1.LeaderElection{}}
return controllercmd.NewController(componentName, opts.RunServingCert).
WithKubeConfigFile(o.basicFlags.KubeConfigFile, nil).
WithLeaderElection(opts.LeaderElection, componentNamespace, componentName+"-lock").
Run()
}
|
[
"\"OPENSHIFT_ON_PANIC\"",
"\"OPENSHIFT_PROFILE\""
] |
[] |
[
"OPENSHIFT_PROFILE",
"OPENSHIFT_ON_PANIC"
] |
[]
|
["OPENSHIFT_PROFILE", "OPENSHIFT_ON_PANIC"]
|
go
| 2 | 0 | |
main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/spf13/pflag"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
)
var (
containerName = pflag.StringP("container", "c", "", "Container in which to execute the command. Defaults to only container if there is only one container in the pod.")
namespace = pflag.StringP("namespace", "n", "", "Namespace where pod is deployed. Defaults to default.")
command = pflag.StringSliceP("command", "e", []string{"sh"}, "The remote command to execute. Defaults to sh.")
help = pflag.BoolP("help", "h", false, "Prints help for application.")
)
func main() {
validUsageAndExitOnFailure()
kubeconfig := getConfig(os.Getenv("KUBECONFIG"))
k8sCliCfg, err := kubeconfig.ClientConfig()
fatalOnErr(err, "while getting client cfg")
k8sCoreCli, err := corev1.NewForConfig(k8sCliCfg)
fatalOnErr(err, "while creating core client")
podName := pflag.Arg(0)
ns, err := determineNamespace(kubeconfig)
fatalOnErr(err, "while getting default namespace")
req := k8sCoreCli.RESTClient().
Post().
Namespace(ns).
Resource("pods").
Name(podName).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Container: *containerName,
Command: *command,
Stdin: true,
Stdout: true,
Stderr: true,
TTY: true,
}, scheme.ParameterCodec)
fmt.Printf("Exec to POD %s/%s with command %q\n", ns, podName, *command)
exec, err := remotecommand.NewSPDYExecutor(k8sCliCfg, http.MethodPost, req.URL())
fatalOnErr(err, "while creating SPDY executor")
// By default terminal starts in cooked mode (canonical).
// In this mode, keyboard input is preprocessed before being given to a program.
// In Raw mode the data is passed to the program without interpreting any of the special characters, by that
// we are turning off the ECHO feature because we already connecting the streams between our terminal and the remote shell process
// Stdin: os.Stdin -> Stdout: os.Stdout,
oldState, err := terminal.MakeRaw(0)
fatalOnErr(err, "while putting terminal into raw mode")
defer terminal.Restore(0, oldState)
err = exec.Stream(remotecommand.StreamOptions{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Tty: true,
})
fatalOnErr(err, "connect to process")
}
func determineNamespace(cfg clientcmd.ClientConfig) (string, error) {
if *namespace != "" {
return *namespace, nil
}
ns, _, err := cfg.Namespace()
return ns, err
}
func getConfig(explicitKubeconfig string) clientcmd.ClientConfig {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
rules.ExplicitPath = explicitKubeconfig
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{})
}
func fatalOnErr(err error, ctx string) {
if err != nil {
log.Fatalf("%s: %v", ctx, err)
}
}
func validUsageAndExitOnFailure() {
pflag.Parse()
if *help {
printHelpAndExit()
}
if pflag.NArg() == 0 || pflag.NArg() > 1 {
printArgErrMsgAndExit()
}
}
func printHelpAndExit() {
fmt.Println("Execute a command in a container.")
fmt.Printf("Usage: \n \t '%s POD_NAME [-c CONTAINER] [-n NAMESPACE] [-e COMMAND]'\n", os.Args[0])
fmt.Println("Options:")
pflag.PrintDefaults()
os.Exit(0)
}
func printArgErrMsgAndExit() {
fmt.Printf("Expected '%s POD_NAME [-c CONTAINER] [-n NAMESPACE] [-e COMMAND]'\n", os.Args[0])
fmt.Printf("POD is a required argument for the %s command\n", os.Args[0])
fmt.Println()
fmt.Println("Options:")
pflag.PrintDefaults()
os.Exit(1)
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
vortex/middlewares/headers.py
|
import os
from distutils.util import strtobool
from urllib.parse import urlparse
from aiohttp.web import Response
from vortex.config import DOMAIN
from vortex.middlewares import middleware
ALLOWED_ORIGINS = os.getenv("VORTEX_ALLOWED_ORIGINS", "localhost")
DISABLE_ORIGIN_CHECK = strtobool(os.getenv("VORTEX_DISABLE_ORIGIN_CHECK", "False"))
ACCEPT = [
"text/html",
"application/xhtml+xml",
"application/xml",
"application/json;q=0.9",
"*/*;q=0.8",
]
@middleware
async def headers_middleware(request, handler):
origin = request.headers.get("Origin")
if origin:
parsed = urlparse(origin)
request.domain = parsed.hostname
else:
request.domain = DOMAIN or urlparse(str(request.url)).hostname
if request.method != "OPTIONS":
response = await handler(request)
else:
response = Response()
if origin and (
DISABLE_ORIGIN_CHECK
or (request.domain and request.domain.endswith(ALLOWED_ORIGINS))
):
response.headers["Access-Control-Allow-Origin"] = origin
response.headers["Access-Control-Allow-Credentials"] = "true"
response.headers[
"Access-Control-Allow-Headers"
] = "Origin, X-Requested-With, Content-Type, Accept, Authorization"
response.headers["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS, DELETE, PUT"
response.headers["Accept"] = ",".join(ACCEPT)
response.headers["Accept-Language"] = "en-us,en;q=0.5"
response.headers["Accept-Encoding"] = "gzip,deflate"
response.headers["Accept-Charset"] = "ISO-8859-1,utf-8;q=0.7,*;q=0.7"
return response
|
[] |
[] |
[
"VORTEX_DISABLE_ORIGIN_CHECK",
"VORTEX_ALLOWED_ORIGINS"
] |
[]
|
["VORTEX_DISABLE_ORIGIN_CHECK", "VORTEX_ALLOWED_ORIGINS"]
|
python
| 2 | 0 | |
MT_Training.py
|
#!/usr/bin/python
import sys
import os
import subprocess
from os.path import join, isdir
import numpy as np
import fileinput
import json
import random
from itertools import chain
from numpy.random import permutation
##------------------------------------------------------------------
import torch
from torch.autograd import Variable
#----------------------------------------
import torch.nn as nn
from torch import autograd, nn, optim
os.environ['PYTHONUNBUFFERED'] = '0'
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from random import shuffle
from statistics import mean
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
matplotlib.pyplot.viridis()
import glob
#*************************************************************************************************************************
####### Loading the Parser and default arguments
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/MT_Transformer/MT_TransV1/')
import MT_Transformer_arg
from MT_Transformer_arg import parser
args = parser.parse_args()
###save architecture for decoding
model_path_name=join(args.model_dir,'model_architecture_')
with open(model_path_name, 'w') as f:
json.dump(args.__dict__, f, indent=2)
print(args)
# #####setting the gpus in the gpu cluster
# #**********************************
from Set_gpus import Set_gpu
if args.gpu:
Set_gpu()
###----------------------------------------
#==============================================================
from Dataloader_for_MT_v2 import DataLoader
from TRANSFORMER_MT_V1 import Transformer
from Initializing_Transformer_MT import Initialize_Att_model
from Training_loop_MT import train_val_model
from Load_sp_model import Load_sp_models
##==================================
#==============================================================
if not isdir(args.model_dir):
os.makedirs(args.model_dir)
png_dir=args.model_dir+'_png'
if not isdir(png_dir):
os.makedirs(png_dir)
############################################
#=============================================================
def main():
##Load setpiece models for Dataloaders
Src_model=Load_sp_models(args.Src_model_path)
Tgt_model=Load_sp_models(args.Tgt_model_path)
###initilize the model
model,optimizer=Initialize_Att_model(args)
#============================================================
#------------------------------------------------------------
train_gen = DataLoader({'files': glob.glob(args.data_dir + "train_splits_V2/*"),
'max_batch_label_len': args.max_batch_label_len,
'max_batch_len': args.max_batch_len,
'max_feat_len': args.max_feat_len,
'max_label_len': args.max_label_len,
'Src_model': Src_model,
'Tgt_model': Tgt_model,
'queue_size': 100,
'apply_cmvn': 1,
'min_words': args.min_words,
'max_words': args.max_words,
'min_len_ratio': args.min_len_ratio})
dev_gen = DataLoader({'files': glob.glob(args.data_dir + "dev_splits/*"),
'max_batch_label_len': 20000,
'max_batch_len': args.max_batch_len,
'max_feat_len': 1000,
'max_label_len': 1000,
'Src_model': Src_model,
'Tgt_model': Tgt_model,
'queue_size': 100,
'apply_cmvn': 1,
'min_words': 0,
'max_words': 10000,
'min_len_ratio': 4})
#Flags that may change while training
val_history=np.zeros(args.nepochs)
#======================================
for epoch in range(args.nepochs):
##start of the epoch
tr_CER=[]; tr_BPE_CER=[]; L_train_cost=[]
model.train();
validate_interval = int(args.validate_interval * args.accm_grad) if args.accm_grad>0 else args.validate_interval
for trs_no in range(validate_interval):
B1 = train_gen.next()
assert B1 is not None, "None should never come out of the DataLoader"
Output_trainval_dict=train_val_model(smp_no=trs_no,
args = args,
model = model,
optimizer = optimizer,
data_dict = B1,
trainflag = True)
#
#
#get the losses form the dict
L_train_cost.append(Output_trainval_dict.get('cost_cpu'))
tr_CER.append(Output_trainval_dict.get('Char_cer'))
tr_BPE_CER.append(Output_trainval_dict.get('Word_cer'))
#attention_map=Output_trainval_dict.get('attention_record').data.cpu().numpy()
#==========================================
if (trs_no%args.tr_disp==0):
print("tr ep:==:>",epoch,"sampl no:==:>",trs_no,"train_cost==:>",__mean(L_train_cost),"CER:",__mean(tr_CER),'BPE_CER',__mean(tr_BPE_CER),flush=True)
#------------------------
if args.plot_fig_training:
plot_name=join(png_dir,'train_epoch'+str(epoch)+'_attention_single_file_'+str(trs_no)+'.png')
plotting(plot_name,attention_map)
###validate the model
model.eval()
#=======================================================
Vl_CER=[]; Vl_BPE_CER=[];L_val_cost=[]
val_examples=0
for vl_smp in range(args.max_val_examples):
B1 = dev_gen.next()
smp_feat = B1.get('smp_Src_data')
val_examples+=smp_feat.shape[0]
assert B1 is not None, "None should never come out of the DataLoader"
##brak when the examples are more
if (val_examples >= args.max_val_examples):
break;
#--------------------------------------
Val_Output_trainval_dict=train_val_model(smp_no=trs_no,
args=args,
model = model,
optimizer = optimizer,
data_dict = B1,
trainflag = False)
L_val_cost.append(Val_Output_trainval_dict.get('cost_cpu'))
Vl_CER.append(Val_Output_trainval_dict.get('Char_cer'))
Vl_BPE_CER.append(Val_Output_trainval_dict.get('Word_cer'))
#attention_map=Val_Output_trainval_dict.get('attention_record').data.cpu().numpy()
#======================================================
#======================================================
if (vl_smp%args.vl_disp==0) or (val_examples==args.max_val_examples-1):
print("val epoch:==:>",epoch,"val smp no:==:>",vl_smp,"val_cost:==:>",__mean(L_val_cost),"CER:",__mean(Vl_CER),'BPE_CER',__mean(Vl_BPE_CER),flush=True)
if args.plot_fig_validation:
plot_name=join(png_dir,'val_epoch'+str(epoch)+'_attention_single_file_'+str(vl_smp)+'.png')
plotting(plot_name,attention_map)
#----------------------------------------------------
#==================================================================
val_history[epoch]=(__mean(Vl_CER)*100)
print("val_history:",val_history[:epoch+1])
#==================================================================
####saving_weights
ct="model_epoch_"+str(epoch)+"_sample_"+str(trs_no)+"_"+str(__mean(L_train_cost))+"___"+str(__mean(L_val_cost))+"__"+str(__mean(Vl_CER))
print(ct)
torch.save(model.state_dict(),join(args.model_dir,str(ct)))
#######################################################
#######################################################
###open the file write and close it to avoid delays
with open(args.weight_text_file,'a+') as weight_saving_file:
print(join(args.model_dir,str(ct)), file=weight_saving_file)
with open(args.Res_text_file,'a+') as Res_saving_file:
print(float(__mean(Vl_CER)), file=Res_saving_file)
#=================================
# early_stopping and checkpoint averaging:
##print(np.array(val_his[i:i+5]),np.any(np.abs(np.array(val_his[i:i+5])-np.array(val_his[i]))>0.6))
if args.early_stopping:
A=val_history
Non_zero_loss=A[A>0]
min_cpts=np.argmin(Non_zero_loss)
Non_zero_len=len(Non_zero_loss)
if ((Non_zero_len-min_cpts)>1):
weight_noise_flag=True
spec_aug_flag=True
#-----------------------
if epoch>args.early_stopping_patience:
#if (Non_zero_len-min_cpts) > args.early_stopping_patience:
#np.any(np.abs(A[i:i+5]-A[i])>0.5)==False
if np.any(np.abs( Non_zero_loss[ epoch - args.early_stopping_patience:epoch ] - Non_zero_loss[epoch-1])>0.5)==False:
"General early stopping has over trained the model or may be should i regularize with dropout"
print("The model is early stopping........","minimum value of model is:",min_cpts)
exit(0)
#======================================================
def __mean(inp):
"""
"""
if len(inp)==1:
return inp[0]
else:
return mean(inp)
#=============================================================================================
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PYTHONUNBUFFERED"
] |
[]
|
["PYTHONUNBUFFERED"]
|
python
| 1 | 0 | |
real_estate/celery.py
|
from __future__ import absolute_import
import os
from celery import Celery
from real_estate.settings import base
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "real_estate.settings.development")
app = Celery("real_estate")
app.config_from_object("real_estate.settings.development", namespace="CELERY"),
app.autodiscover_tasks(lambda: base.INSTALLED_APPS)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
WebKit/Tools/Scripts/webkitpy/common/host.py
|
# Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import sys
from webkitpy.common.checkout import Checkout
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.memoized import memoized
from webkitpy.common.net import bugzilla, buildbot, web
from webkitpy.common.net.buildbot.chromiumbuildbot import ChromiumBuildBot
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.watchlist.watchlistloader import WatchListLoader
from webkitpy.layout_tests.port.factory import PortFactory
_log = logging.getLogger(__name__)
class Host(SystemHost):
def __init__(self):
SystemHost.__init__(self)
self.web = web.Web()
# FIXME: Checkout should own the scm object.
self._scm = None
self._checkout = None
# Everything below this line is WebKit-specific and belongs on a higher-level object.
self.bugs = bugzilla.Bugzilla()
self.buildbot = buildbot.BuildBot()
# FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
# In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
# so for now we just pass along the whole Host object.
# FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
self.port_factory = PortFactory(self)
self._engage_awesome_locale_hacks()
# We call this from the Host constructor, as it's one of the
# earliest calls made for all webkitpy-based programs.
def _engage_awesome_locale_hacks(self):
# To make life easier on our non-english users, we override
# the locale environment variables inside webkitpy.
# If we don't do this, programs like SVN will output localized
# messages and svn.py will fail to parse them.
# FIXME: We should do these overrides *only* for the subprocesses we know need them!
# This hack only works in unix environments.
os.environ['LANGUAGE'] = 'en'
os.environ['LANG'] = 'en_US.UTF-8'
os.environ['LC_MESSAGES'] = 'en_US.UTF-8'
os.environ['LC_ALL'] = ''
# FIXME: This is a horrible, horrible hack for ChromiumWin and should be removed.
# Maybe this belongs in SVN in some more generic "find the svn binary" codepath?
# Or possibly Executive should have a way to emulate shell path-lookups?
# FIXME: Unclear how to test this, since it currently mutates global state on SVN.
def _engage_awesome_windows_hacks(self):
try:
self.executive.run_command(['svn', 'help'])
except OSError, e:
try:
self.executive.run_command(['svn.bat', 'help'])
# Chromium Win uses the depot_tools package, which contains a number
# of development tools, including Python and svn. Instead of using a
# real svn executable, depot_tools indirects via a batch file, called
# svn.bat. This batch file allows depot_tools to auto-update the real
# svn executable, which is contained in a subdirectory.
#
# That's all fine and good, except that subprocess.popen can detect
# the difference between a real svn executable and batch file when we
# don't provide use shell=True. Rather than use shell=True on Windows,
# We hack the svn.bat name into the SVN class.
_log.debug('Engaging svn.bat Windows hack.')
from webkitpy.common.checkout.scm.svn import SVN
SVN.executable_name = 'svn.bat'
except OSError, e:
_log.debug('Failed to engage svn.bat Windows hack.')
def _initialize_scm(self, patch_directories=None):
if sys.platform == "win32":
self._engage_awesome_windows_hacks()
detector = SCMDetector(self.filesystem, self.executive)
self._scm = detector.default_scm(patch_directories)
self._checkout = Checkout(self.scm())
def scm(self):
return self._scm
def checkout(self):
return self._checkout
@memoized
def chromium_buildbot(self):
return ChromiumBuildBot()
@memoized
def watch_list(self):
return WatchListLoader(self.filesystem).load()
|
[] |
[] |
[
"LC_ALL",
"LC_MESSAGES",
"LANG",
"LANGUAGE"
] |
[]
|
["LC_ALL", "LC_MESSAGES", "LANG", "LANGUAGE"]
|
python
| 4 | 0 | |
webapp/src/product/products.go
|
package main
import (
"encoding/json"
"fmt"
"os"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
type Item struct {
Id int64 `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Price float64 `json:"price"`
}
func GetItemById(id string) (Item, error) {
// Build the Dynamo client object
sess := session.Must(session.NewSession())
svc := dynamodb.New(sess)
var item Item
// Perform the query
fmt.Println("Trying to read from table: ", os.Getenv("TABLE_NAME"))
result, err := svc.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
Key: map[string]*dynamodb.AttributeValue{
"id": {
N: aws.String(id),
},
},
})
if err != nil {
fmt.Println(err.Error())
return item, err
}
// Unmarshall the result in to an Item
err = dynamodbattribute.UnmarshalMap(result.Item, &item)
if err != nil {
fmt.Println(err.Error())
return item, err
}
return item, nil
}
func GetAllItems() (Item, error) {
sess := session.Must(session.NewSession())
svc := dynamodb.New(sess)
item := Item{}
// Perform the query
fmt.Println("Trying to read from table: ", os.Getenv("TABLE_NAME"))
result, err := svc.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(os.Getenv("TABLE_NAME")),
})
if err != nil {
fmt.Println(err.Error())
return item, err
}
// Unmarshall the result in to an Item
err = dynamodbattribute.UnmarshalMap(result.Item, &item)
if err != nil {
fmt.Println(err.Error())
return item, err
}
return item, nil
}
func AddItem(body string) (Item, error) {
// Create the dynamo client object
sess := session.Must(session.NewSession())
svc := dynamodb.New(sess)
// Marshall the requrest body
var thisItem Item
json.Unmarshal([]byte(body), &thisItem)
thisItem.Id = time.Now().UnixNano()
fmt.Println("Item to add:", thisItem)
// Marshall the Item into a Map DynamoDB can deal with
av, err := dynamodbattribute.MarshalMap(thisItem)
if err != nil {
fmt.Println("Got error marshalling map:")
fmt.Println(err.Error())
return thisItem, err
}
// Create Item in table and return
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(os.Getenv("TABLE_NAME")),
}
_, err = svc.PutItem(input)
return thisItem, err
}
func DeleteItem(id string) error {
// Build the Dynamo client object
sess := session.Must(session.NewSession())
svc := dynamodb.New(sess)
// Perform the delete
input := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"id": {
N: aws.String(id),
},
},
TableName: aws.String(os.Getenv("TABLE_NAME")),
}
_, err := svc.DeleteItem(input)
if err != nil {
fmt.Println(err.Error())
return err
}
return nil
}
func EditItem(body string) (Item, error) {
// Create the dynamo client object
sess := session.Must(session.NewSession())
svc := dynamodb.New(sess)
// Marshall the requrest body
var thisItem Item
json.Unmarshal([]byte(body), &thisItem)
fmt.Println("Item to update:", thisItem)
// Update Item in table and return
input := &dynamodb.UpdateItemInput{
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":name": {
S: aws.String(thisItem.Name),
},
":description": {
S: aws.String(thisItem.Description),
},
":price": {
N: aws.String(strconv.FormatFloat(thisItem.Price, 'f', 2, 64)),
},
},
TableName: aws.String(os.Getenv("TABLE_NAME")),
Key: map[string]*dynamodb.AttributeValue{
"id": {
N: aws.String(strconv.FormatInt(thisItem.Id, 10)),
},
},
ReturnValues: aws.String("UPDATED_NEW"),
UpdateExpression: aws.String("set info.rating = :r, info.plot = :p"),
}
_, err := svc.UpdateItem(input)
if err != nil {
fmt.Println(err.Error())
}
return thisItem, err
}
|
[
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\"",
"\"TABLE_NAME\""
] |
[] |
[
"TABLE_NAME"
] |
[]
|
["TABLE_NAME"]
|
go
| 1 | 0 | |
src/radical/pilot/agent/resource_manager/pbspro.py
|
__copyright__ = 'Copyright 2016-2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import os
import subprocess
from typing import List
import radical.utils as ru
from .base import RMInfo, ResourceManager
# ------------------------------------------------------------------------------
#
class PBSPro(ResourceManager):
# --------------------------------------------------------------------------
#
def _init_from_scratch(self, rm_info: RMInfo) -> RMInfo:
# TODO: $NCPUS?!?! = 1 on archer
cpn = os.environ.get('NUM_PPN') or os.environ.get('SAGA_PPN')
if not cpn:
raise RuntimeError('$NUM_PPN and $SAGA_PPN not set!')
pbspro_vnodes = self._parse_pbspro_vnodes()
nodes = [(name, int(cpn)) for name in sorted(pbspro_vnodes)]
if not rm_info.cores_per_node:
rm_info.cores_per_node = self._get_cores_per_node(nodes)
rm_info.node_list = self._get_node_list(nodes, rm_info)
return rm_info
# --------------------------------------------------------------------------
#
def _parse_pbspro_vnodes(self) -> List[str]:
# PBS Job ID
pbspro_jobid = os.environ.get('PBS_JOBID')
if not pbspro_jobid:
raise RuntimeError('$PBS_JOBID not set')
# Get the output of qstat -f for this job
output = subprocess.check_output(['qstat', '-f', pbspro_jobid])
# Get the (multiline) 'exec_vnode' entry
vnodes_str = ''
for line in output.splitlines():
line = ru.as_string(line)
# Detect start of entry
if 'exec_vnode = ' in line:
vnodes_str += line.strip()
elif vnodes_str:
# Find continuing lines
if ' = ' not in line:
vnodes_str += line.strip()
else:
break
# Get the RHS of the entry
rhs = vnodes_str.split('=', 1)[1].strip()
self._log.debug('input: %s', rhs)
nodes_list = []
# Break up the individual node partitions into vnode slices
while True:
idx = rhs.find(')+(')
node_str = rhs[1:idx]
nodes_list.append(node_str)
rhs = rhs[idx + 2:]
if idx < 0:
break
vnodes_list = []
cpus_list = []
# Split out the slices into vnode name and cpu count
for node_str in nodes_list:
slices = node_str.split('+')
for _slice in slices:
vnode, cpus = _slice.split(':')
cpus = int(cpus.split('=')[1])
self._log.debug('vnode: %s cpus: %s', vnode, cpus)
vnodes_list.append(vnode)
cpus_list.append(cpus)
self._log.debug('vnodes: %s', vnodes_list)
self._log.debug('cpus: %s', cpus_list)
cpus_list = list(set(cpus_list))
min_cpus = int(min(cpus_list))
if len(cpus_list) > 1:
self._log.debug('Detected vnodes of different sizes: %s, ' +
'the minimal is: %d.', cpus_list, min_cpus)
node_list = []
for vnode in vnodes_list:
node_list.append(vnode)
# only unique node names
node_list = list(set(node_list))
self._log.debug('Node list: %s', node_list)
# Return the list of node names
return sorted(node_list)
# ------------------------------------------------------------------------------
|
[] |
[] |
[
"NUM_PPN",
"PBS_JOBID",
"SAGA_PPN"
] |
[]
|
["NUM_PPN", "PBS_JOBID", "SAGA_PPN"]
|
python
| 3 | 0 | |
crabageprediction/venv/Lib/site-packages/matplotlib/tests/test_sphinxext.py
|
"""Tests for tinypages build using sphinx extensions."""
import filecmp
import os
from pathlib import Path
import shutil
from subprocess import Popen, PIPE
import sys
import pytest
pytest.importorskip('sphinx',
minversion=None if sys.version_info < (3, 10) else '4.1.3')
def test_tinypages(tmpdir):
source_dir = Path(tmpdir) / 'src'
shutil.copytree(Path(__file__).parent / 'tinypages', source_dir)
html_dir = source_dir / '_build' / 'html'
doctree_dir = source_dir / 'doctrees'
# Build the pages with warnings turned into errors
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir),
str(Path(__file__).parent / 'tinypages'), str(html_dir)]
# On CI, gcov emits warnings (due to agg headers being included with the
# same name in multiple extension modules -- but we don't care about their
# coverage anyways); hide them using GCOV_ERROR_FILE.
proc = Popen(
cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": "", "GCOV_ERROR_FILE": os.devnull})
out, err = proc.communicate()
# Build the pages with warnings turned into errors
build_sphinx_html(source_dir, doctree_dir, html_dir)
def plot_file(num):
return html_dir / f'some_plots-{num}.png'
def plot_directive_file(num):
# This is always next to the doctree dir.
return doctree_dir.parent / 'plot_directive' / f'some_plots-{num}.png'
range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
# Plot 5 is range(6) plot
assert filecmp.cmp(range_6, plot_file(5))
# Plot 7 is range(4) plot
assert filecmp.cmp(range_4, plot_file(7))
# Plot 11 is range(10) plot
assert filecmp.cmp(range_10, plot_file(11))
# Plot 12 uses the old range(10) figure and the new range(6) figure
assert filecmp.cmp(range_10, plot_file('12_00'))
assert filecmp.cmp(range_6, plot_file('12_01'))
# Plot 13 shows close-figs in action
assert filecmp.cmp(range_4, plot_file(13))
# Plot 14 has included source
html_contents = (html_dir / 'some_plots.html').read_bytes()
assert b'# Only a comment' in html_contents
# check plot defined in external file.
assert filecmp.cmp(range_4, html_dir / 'range4.png')
assert filecmp.cmp(range_6, html_dir / 'range6.png')
# check if figure caption made it into html file
assert b'This is the caption for plot 15.' in html_contents
# check if figure caption using :caption: made it into html file
assert b'Plot 17 uses the caption option.' in html_contents
# check if figure caption made it into html file
assert b'This is the caption for plot 18.' in html_contents
# check if the custom classes made it into the html file
assert b'plot-directive my-class my-other-class' in html_contents
# check that the multi-image caption is applied twice
assert html_contents.count(b'This caption applies to both plots.') == 2
# Plot 21 is range(6) plot via an include directive. But because some of
# the previous plots are repeated, the argument to plot_file() is only 17.
assert filecmp.cmp(range_6, plot_file(17))
# Modify the included plot
contents = (source_dir / 'included_plot_21.rst').read_text()
contents = contents.replace('plt.plot(range(6))', 'plt.plot(range(4))')
(source_dir / 'included_plot_21.rst').write_text(contents)
# Build the pages again and check that the modified file was updated
modification_times = [plot_directive_file(i).stat().st_mtime
for i in (1, 2, 3, 5)]
build_sphinx_html(source_dir, doctree_dir, html_dir)
assert filecmp.cmp(range_4, plot_file(17))
# Check that the plots in the plot_directive folder weren't changed.
# (plot_directive_file(1) won't be modified, but it will be copied to html/
# upon compilation, so plot_file(1) will be modified)
assert plot_directive_file(1).stat().st_mtime == modification_times[0]
assert plot_directive_file(2).stat().st_mtime == modification_times[1]
assert plot_directive_file(3).stat().st_mtime == modification_times[2]
assert filecmp.cmp(range_10, plot_file(1))
assert filecmp.cmp(range_6, plot_file(2))
assert filecmp.cmp(range_4, plot_file(3))
# Make sure that figures marked with context are re-created (but that the
# contents are the same)
assert plot_directive_file(5).stat().st_mtime > modification_times[3]
assert filecmp.cmp(range_6, plot_file(5))
def test_plot_html_show_source_link(tmpdir):
source_dir = Path(tmpdir) / 'src'
source_dir.mkdir()
parent = Path(__file__).parent
shutil.copyfile(parent / 'tinypages/conf.py', source_dir / 'conf.py')
shutil.copytree(parent / 'tinypages/_static', source_dir / '_static')
doctree_dir = source_dir / 'doctrees'
(source_dir / 'index.rst').write_text("""
.. plot::
plt.plot(range(2))
""")
# Make sure source scripts are created by default
html_dir1 = source_dir / '_build' / 'html1'
build_sphinx_html(source_dir, doctree_dir, html_dir1)
assert "index-1.py" in [p.name for p in html_dir1.iterdir()]
# Make sure source scripts are NOT created when
# plot_html_show_source_link` is False
html_dir2 = source_dir / '_build' / 'html2'
build_sphinx_html(source_dir, doctree_dir, html_dir2,
extra_args=['-D', 'plot_html_show_source_link=0'])
assert "index-1.py" not in [p.name for p in html_dir2.iterdir()]
def build_sphinx_html(source_dir, doctree_dir, html_dir, extra_args=None):
# Build the pages with warnings turned into errors
extra_args = [] if extra_args is None else extra_args
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', str(doctree_dir), str(source_dir), str(html_dir), *extra_args]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True,
env={**os.environ, "MPLBACKEND": ""})
out, err = proc.communicate()
assert proc.returncode == 0, \
f"sphinx build failed with stdout:\n{out}\nstderr:\n{err}\n"
if err:
pytest.fail(f"sphinx build emitted the following warnings:\n{err}")
assert html_dir.is_dir()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/config.go
|
package main
import (
"encoding/json"
"fmt"
"github.com/wunderlist/hamustro/src/dialects"
"github.com/wunderlist/hamustro/src/dialects/abs"
"github.com/wunderlist/hamustro/src/dialects/aqs"
"github.com/wunderlist/hamustro/src/dialects/file"
"github.com/wunderlist/hamustro/src/dialects/s3"
"github.com/wunderlist/hamustro/src/dialects/sns"
"io/ioutil"
"log"
"os"
"runtime"
"strconv"
"strings"
)
// Application configuration
type Config struct {
LogFile string `json:"logfile"`
Dialect string `json:"dialect"`
MaxWorkerSize int `json:"max_worker_size"`
MaxQueueSize int `json:"max_queue_size"`
RetryAttempt int `json:"retry_attempt"`
BufferSize int `json:"buffer_size"`
MaskedIP bool `json:"masked_ip"`
SpreadBufferSize bool `json:"spread_buffer_size"`
Signature string `json:"signature"`
SharedSecret string `json:"shared_secret"`
MaintenanceKey string `json:"maintenance_key"`
AutoFlushInterval int `json:"auto_flush_interval"`
AQS aqs.Config `json:"aqs"`
SNS sns.Config `json:"sns"`
ABS abs.Config `json:"abs"`
S3 s3.Config `json:"s3"`
File file.Config `json:"file"`
}
// Creates a new configuration object
func NewConfig(filename string) *Config {
file, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatal(err)
}
var config Config
if err := json.Unmarshal(file, &config); err != nil {
log.Fatal(err)
}
config.UpdateAutoFlushIntervalToSeconds()
return &config
}
// Configuration validation
func (c *Config) IsValid() bool {
return c.Dialect != "" && c.SharedSecret != ""
}
// Get Signature's status
func (c *Config) IsSignatureRequired() bool {
switch c.Signature {
case "required":
return true
case "optional":
return false
default:
return true
}
}
// Returns the maximum worker size
func (c *Config) GetMaxWorkerSize() int {
size, _ := strconv.ParseInt(os.Getenv("HAMUSTRO_MAX_WORKER_SIZE"), 10, 0)
if size != 0 {
return int(size)
}
if c.MaxWorkerSize != 0 {
return c.MaxWorkerSize
}
return runtime.NumCPU() + 1
}
// Returns the maximum queue size
func (c *Config) GetMaxQueueSize() int {
size, _ := strconv.ParseInt(os.Getenv("HAMUSTRO_MAX_QUEUE_SIZE"), 10, 0)
if size != 0 {
return int(size)
}
if c.MaxQueueSize != 0 {
return c.MaxQueueSize
}
return c.GetMaxWorkerSize() * 20
}
// Returns the port of the application
func (c *Config) GetPort() string {
if port := os.Getenv("HAMUSTRO_PORT"); port != "" {
return port
}
return "8080"
}
// Returns the host of the application
func (c *Config) GetHost() string {
if port := os.Getenv("HAMUSTRO_HOST"); port != "" {
return port
}
return "localhost"
}
// Returns the address of the application
func (c *Config) GetAddress() string {
host := c.GetHost()
if host == "localhost" {
return ":" + c.GetPort()
}
return host + ":" + c.GetPort()
}
// Returns the default buffer size for Buffered Storage.
func (c *Config) GetBufferSize() int {
if c.BufferSize != 0 {
return c.BufferSize
}
return (c.GetMaxWorkerSize() * c.GetMaxQueueSize()) * 10
}
// Update automatic flush interval to seconds
func (c *Config) UpdateAutoFlushIntervalToSeconds() {
c.AutoFlushInterval = c.AutoFlushInterval * 60
}
// Returns the default spreding property
func (c *Config) IsSpreadBuffer() bool {
return c.SpreadBufferSize
}
// Should we truncate the IP address
func (c *Config) IsMaskedIP() bool {
return c.MaskedIP
}
// Returns the retry attempt number
func (c *Config) GetRetryAttempt() int {
if c.RetryAttempt != 0 {
return c.RetryAttempt
}
return 3
}
// Returns the selected dialect's configuration object
func (c *Config) DialectConfig() (dialects.Dialect, error) {
switch strings.ToLower(c.Dialect) {
case "aqs":
return &c.AQS, nil
case "sns":
return &c.SNS, nil
case "abs":
return &c.ABS, nil
case "s3":
return &c.S3, nil
case "file":
return &c.File, nil
}
return nil, fmt.Errorf("Not supported `%s` dialect in the configuration file.", c.Dialect)
}
|
[
"\"HAMUSTRO_MAX_WORKER_SIZE\"",
"\"HAMUSTRO_MAX_QUEUE_SIZE\"",
"\"HAMUSTRO_PORT\"",
"\"HAMUSTRO_HOST\""
] |
[] |
[
"HAMUSTRO_MAX_WORKER_SIZE",
"HAMUSTRO_MAX_QUEUE_SIZE",
"HAMUSTRO_HOST",
"HAMUSTRO_PORT"
] |
[]
|
["HAMUSTRO_MAX_WORKER_SIZE", "HAMUSTRO_MAX_QUEUE_SIZE", "HAMUSTRO_HOST", "HAMUSTRO_PORT"]
|
go
| 4 | 0 | |
nuv/util.go
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package main
import (
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
var dryRunBuf = []string{}
// DryRunPush saves dummy results for dry run execution
func DryRunPush(buf ...string) {
dryRunBuf = buf
}
// DryRunPop returns a value from the buffer of dry run results
// returns an empty string if the buffer is empty
func DryRunPop(buf ...string) string {
res := ""
if len(dryRunBuf) > 0 {
res = dryRunBuf[0]
dryRunBuf = dryRunBuf[1:]
}
return res
}
// SysErr executes a command in a convenient way:
// it splits the paramenter in arguments if separated by spaces,
// then accepts multiple arguments;
// logs errors in stderr and prints output in stdout;
// also returns output as a string, or an error if there is an error
// If the command starts with "@" do not print the output.
func SysErr(cli string, args ...string) (string, error) {
return sysErr(false, cli, args...)
}
// DryRunSysErr performs a dry run of SysErr
// in this case it always prints the command
func DryRunSysErr(cli string, args ...string) (string, error) {
return sysErr(true, cli, args...)
}
func sysErr(dryRun bool, cli string, args ...string) (string, error) {
re := regexp.MustCompile(`[\r\t\n\f ]+`)
a := strings.Split(re.ReplaceAllString(cli, " "), " ")
params := args
//fmt.Println(params)
if len(a) > 1 {
params = append(a[1:], args...)
}
exe := strings.TrimPrefix(a[0], "@")
silent := strings.HasPrefix(a[0], "@")
if dryRun {
if len(params) > 0 {
fmt.Printf("%s %s\n", exe, strings.Join(params, " "))
} else {
fmt.Println(exe)
}
res := DryRunPop()
if strings.HasPrefix(res, "!") {
return "", errors.New(res[1:])
}
return res, nil
}
cmd := exec.Command(exe, params...)
out, err := cmd.CombinedOutput()
res := string(out)
if err != nil {
return "", err
}
if !silent {
fmt.Print(res)
}
return res, nil
}
func ExecutingInContainer() bool {
fsys := os.DirFS("/")
// if .dockerenv exists and is a regular file
if info, err := fs.Stat(fsys, ".dockerenv"); os.IsNotExist(err) || !info.Mode().IsRegular() {
return false
}
// and if docker-host.sock exists and is a socket
if info, err := fs.Stat(fsys, "var/run/docker-host.sock"); os.IsNotExist(err) || info.Mode().Type() != fs.ModeSocket {
return false
}
return true
}
func DockerHostKind() error {
if os.Args[1] == "create" || os.Args[1] == "delete" {
appendKubeConfig()
}
os.Args = append([]string{"env", "DOCKER_HOST=unix:///var/run/docker-host.sock"}, os.Args...)
cmd := exec.Command("sudo", os.Args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
return err
}
func DockerHostEmpty() bool {
return len(os.Getenv("DOCKER_HOST")) == 0
}
func appendKubeConfig() {
homedir, _ := GetHomeDir()
kc := filepath.Join(homedir, ".kube/config")
os.Args = append(os.Args, "--kubeconfig="+kc)
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
test/test_lyrics.py
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lyrics' plugin."""
from __future__ import absolute_import, division, print_function
import os
import re
import six
import sys
import unittest
from mock import patch
from test import _common
from beets import logging
from beets.library import Item
from beets.util import bytestring_path
import confuse
from beetsplug import lyrics
from mock import MagicMock
log = logging.getLogger('beets.test_lyrics')
raw_backend = lyrics.Backend({}, log)
google = lyrics.Google(MagicMock(), log)
class LyricsPluginTest(unittest.TestCase):
def setUp(self):
"""Set up configuration."""
lyrics.LyricsPlugin()
def test_search_artist(self):
item = Item(artist='Alice ft. Bob', title='song')
self.assertIn(('Alice ft. Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice feat Bob', title='song')
self.assertIn(('Alice feat Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice feat. Bob', title='song')
self.assertIn(('Alice feat. Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice feats Bob', title='song')
self.assertIn(('Alice feats Bob', ['song']),
lyrics.search_pairs(item))
self.assertNotIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice featuring Bob', title='song')
self.assertIn(('Alice featuring Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice & Bob', title='song')
self.assertIn(('Alice & Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice and Bob', title='song')
self.assertIn(('Alice and Bob', ['song']),
lyrics.search_pairs(item))
self.assertIn(('Alice', ['song']),
lyrics.search_pairs(item))
item = Item(artist='Alice and Bob', title='song')
self.assertEqual(('Alice and Bob', ['song']),
list(lyrics.search_pairs(item))[0])
def test_search_pairs_multi_titles(self):
item = Item(title='1 / 2', artist='A')
self.assertIn(('A', ['1 / 2']), lyrics.search_pairs(item))
self.assertIn(('A', ['1', '2']), lyrics.search_pairs(item))
item = Item(title='1/2', artist='A')
self.assertIn(('A', ['1/2']), lyrics.search_pairs(item))
self.assertIn(('A', ['1', '2']), lyrics.search_pairs(item))
def test_search_pairs_titles(self):
item = Item(title='Song (live)', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song (live)']), lyrics.search_pairs(item))
item = Item(title='Song (live) (new)', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song (live) (new)']), lyrics.search_pairs(item))
item = Item(title='Song (live (new))', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song (live (new))']), lyrics.search_pairs(item))
item = Item(title='Song ft. B', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song ft. B']), lyrics.search_pairs(item))
item = Item(title='Song featuring B', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song featuring B']), lyrics.search_pairs(item))
item = Item(title='Song and B', artist='A')
self.assertNotIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song and B']), lyrics.search_pairs(item))
item = Item(title='Song: B', artist='A')
self.assertIn(('A', ['Song']), lyrics.search_pairs(item))
self.assertIn(('A', ['Song: B']), lyrics.search_pairs(item))
def test_remove_credits(self):
self.assertEqual(
lyrics.remove_credits("""It's close to midnight
Lyrics brought by example.com"""),
"It's close to midnight"
)
self.assertEqual(
lyrics.remove_credits("""Lyrics brought by example.com"""),
""
)
# don't remove 2nd verse for the only reason it contains 'lyrics' word
text = """Look at all the shit that i done bought her
See lyrics ain't nothin
if the beat aint crackin"""
self.assertEqual(lyrics.remove_credits(text), text)
def test_is_lyrics(self):
texts = ['LyricsMania.com - Copyright (c) 2013 - All Rights Reserved']
texts += ["""All material found on this site is property\n
of mywickedsongtext brand"""]
for t in texts:
self.assertFalse(google.is_lyrics(t))
def test_slugify(self):
text = u"http://site.com/\xe7afe-au_lait(boisson)"
self.assertEqual(google.slugify(text),
'http://site.com/cafe_au_lait')
def test_scrape_strip_cruft(self):
text = u"""<!--lyrics below-->
one
<br class='myclass'>
two !
<br><br \\>
<blink>four</blink>"""
self.assertEqual(lyrics._scrape_strip_cruft(text, True),
"one\ntwo !\n\nfour")
def test_scrape_strip_scripts(self):
text = u"""foo<script>bar</script>baz"""
self.assertEqual(lyrics._scrape_strip_cruft(text, True),
"foobaz")
def test_scrape_strip_tag_in_comment(self):
text = u"""foo<!--<bar>-->qux"""
self.assertEqual(lyrics._scrape_strip_cruft(text, True),
"fooqux")
def test_scrape_merge_paragraphs(self):
text = u"one</p> <p class='myclass'>two</p><p>three"
self.assertEqual(lyrics._scrape_merge_paragraphs(text),
"one\ntwo\nthree")
def test_missing_lyrics(self):
self.assertFalse(google.is_lyrics(LYRICS_TEXTS['missing_texts']))
def url_to_filename(url):
url = re.sub(r'https?://|www.', '', url)
fn = "".join(x for x in url if (x.isalnum() or x == '/'))
fn = fn.split('/')
fn = os.path.join(LYRICS_ROOT_DIR,
bytestring_path(fn[0]),
bytestring_path(fn[-1] + '.txt'))
return fn
class MockFetchUrl(object):
def __init__(self, pathval='fetched_path'):
self.pathval = pathval
self.fetched = None
def __call__(self, url, filename=None):
self.fetched = url
fn = url_to_filename(url)
with open(fn, 'r') as f:
content = f.read()
return content
def is_lyrics_content_ok(title, text):
"""Compare lyrics text to expected lyrics for given title."""
if not text:
return
keywords = set(LYRICS_TEXTS[google.slugify(title)].split())
words = set(x.strip(".?, ") for x in text.lower().split())
return keywords <= words
LYRICS_ROOT_DIR = os.path.join(_common.RSRC, b'lyrics')
yaml_path = os.path.join(_common.RSRC, b'lyricstext.yaml')
LYRICS_TEXTS = confuse.load_yaml(yaml_path)
class LyricsGoogleBaseTest(unittest.TestCase):
def setUp(self):
"""Set up configuration."""
try:
__import__('bs4')
except ImportError:
self.skipTest('Beautiful Soup 4 not available')
if sys.version_info[:3] < (2, 7, 3):
self.skipTest("Python's built-in HTML parser is not good enough")
class LyricsPluginSourcesTest(LyricsGoogleBaseTest):
"""Check that beets google custom search engine sources are correctly
scraped.
"""
DEFAULT_SONG = dict(artist=u'The Beatles', title=u'Lady Madonna')
DEFAULT_SOURCES = [
dict(DEFAULT_SONG, backend=lyrics.LyricsWiki),
dict(artist=u'Santana', title=u'Black magic woman',
backend=lyrics.MusiXmatch),
dict(DEFAULT_SONG, backend=lyrics.Genius),
]
GOOGLE_SOURCES = [
dict(DEFAULT_SONG,
url=u'http://www.absolutelyrics.com',
path=u'/lyrics/view/the_beatles/lady_madonna'),
dict(DEFAULT_SONG,
url=u'http://www.azlyrics.com',
path=u'/lyrics/beatles/ladymadonna.html'),
dict(DEFAULT_SONG,
url=u'http://www.chartlyrics.com',
path=u'/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx'),
dict(DEFAULT_SONG,
url=u'http://www.elyricsworld.com',
path=u'/lady_madonna_lyrics_beatles.html'),
dict(url=u'http://www.lacoccinelle.net',
artist=u'Jacques Brel', title=u"Amsterdam",
path=u'/paroles-officielles/275679.html'),
dict(DEFAULT_SONG,
url=u'http://letras.mus.br/', path=u'the-beatles/275/'),
dict(DEFAULT_SONG,
url='http://www.lyricsmania.com/',
path='lady_madonna_lyrics_the_beatles.html'),
dict(DEFAULT_SONG, url=u'http://lyrics.wikia.com/',
path=u'The_Beatles:Lady_Madonna'),
dict(DEFAULT_SONG,
url=u'http://www.lyricsmode.com',
path=u'/lyrics/b/beatles/lady_madonna.html'),
dict(url=u'http://www.lyricsontop.com',
artist=u'Amy Winehouse', title=u"Jazz'n'blues",
path=u'/amy-winehouse-songs/jazz-n-blues-lyrics.html'),
dict(DEFAULT_SONG,
url='http://www.metrolyrics.com/',
path='lady-madonna-lyrics-beatles.html'),
dict(url='http://www.musica.com/', path='letras.asp?letra=2738',
artist=u'Santana', title=u'Black magic woman'),
dict(url=u'http://www.paroles.net/',
artist=u'Lilly Wood & the prick', title=u"Hey it's ok",
path=u'lilly-wood-the-prick/paroles-hey-it-s-ok'),
dict(DEFAULT_SONG,
url='http://www.songlyrics.com',
path=u'/the-beatles/lady-madonna-lyrics'),
dict(DEFAULT_SONG,
url=u'http://www.sweetslyrics.com',
path=u'/761696.The%20Beatles%20-%20Lady%20Madonna.html')
]
def setUp(self):
LyricsGoogleBaseTest.setUp(self)
self.plugin = lyrics.LyricsPlugin()
@unittest.skipUnless(os.environ.get(
'BEETS_TEST_LYRICS_SOURCES', '0') == '1',
'lyrics sources testing not enabled')
def test_backend_sources_ok(self):
"""Test default backends with songs known to exist in respective databases.
"""
errors = []
for s in self.DEFAULT_SOURCES:
res = s['backend'](self.plugin.config, self.plugin._log).fetch(
s['artist'], s['title'])
if not is_lyrics_content_ok(s['title'], res):
errors.append(s['backend'].__name__)
self.assertFalse(errors)
@unittest.skipUnless(os.environ.get(
'BEETS_TEST_LYRICS_SOURCES', '0') == '1',
'lyrics sources testing not enabled')
def test_google_sources_ok(self):
"""Test if lyrics present on websites registered in beets google custom
search engine are correctly scraped.
"""
for s in self.GOOGLE_SOURCES:
url = s['url'] + s['path']
res = lyrics.scrape_lyrics_from_html(
raw_backend.fetch_url(url))
self.assertTrue(google.is_lyrics(res), url)
self.assertTrue(is_lyrics_content_ok(s['title'], res), url)
class LyricsGooglePluginMachineryTest(LyricsGoogleBaseTest):
"""Test scraping heuristics on a fake html page.
"""
source = dict(url=u'http://www.example.com', artist=u'John Doe',
title=u'Beets song', path=u'/lyrics/beetssong')
def setUp(self):
"""Set up configuration"""
LyricsGoogleBaseTest.setUp(self)
self.plugin = lyrics.LyricsPlugin()
@patch.object(lyrics.Backend, 'fetch_url', MockFetchUrl())
def test_mocked_source_ok(self):
"""Test that lyrics of the mocked page are correctly scraped"""
url = self.source['url'] + self.source['path']
res = lyrics.scrape_lyrics_from_html(raw_backend.fetch_url(url))
self.assertTrue(google.is_lyrics(res), url)
self.assertTrue(is_lyrics_content_ok(self.source['title'], res),
url)
@patch.object(lyrics.Backend, 'fetch_url', MockFetchUrl())
def test_is_page_candidate_exact_match(self):
"""Test matching html page title with song infos -- when song infos are
present in the title.
"""
from bs4 import SoupStrainer, BeautifulSoup
s = self.source
url = six.text_type(s['url'] + s['path'])
html = raw_backend.fetch_url(url)
soup = BeautifulSoup(html, "html.parser",
parse_only=SoupStrainer('title'))
self.assertEqual(
google.is_page_candidate(url, soup.title.string,
s['title'], s['artist']), True, url)
def test_is_page_candidate_fuzzy_match(self):
"""Test matching html page title with song infos -- when song infos are
not present in the title.
"""
s = self.source
url = s['url'] + s['path']
url_title = u'example.com | Beats song by John doe'
# very small diffs (typo) are ok eg 'beats' vs 'beets' with same artist
self.assertEqual(google.is_page_candidate(url, url_title, s['title'],
s['artist']), True, url)
# reject different title
url_title = u'example.com | seets bong lyrics by John doe'
self.assertEqual(google.is_page_candidate(url, url_title, s['title'],
s['artist']), False, url)
def test_is_page_candidate_special_chars(self):
"""Ensure that `is_page_candidate` doesn't crash when the artist
and such contain special regular expression characters.
"""
# https://github.com/beetbox/beets/issues/1673
s = self.source
url = s['url'] + s['path']
url_title = u'foo'
google.is_page_candidate(url, url_title, s['title'], u'Sunn O)))')
class SlugTests(unittest.TestCase):
def test_slug(self):
# plain ascii passthrough
text = u"test"
self.assertEqual(lyrics.slug(text), 'test')
# german unicode and capitals
text = u"Mørdag"
self.assertEqual(lyrics.slug(text), 'mordag')
# more accents and quotes
text = u"l'été c'est fait pour jouer"
self.assertEqual(lyrics.slug(text), 'l-ete-c-est-fait-pour-jouer')
# accents, parens and spaces
text = u"\xe7afe au lait (boisson)"
self.assertEqual(lyrics.slug(text), 'cafe-au-lait-boisson')
text = u"Multiple spaces -- and symbols! -- merged"
self.assertEqual(lyrics.slug(text),
'multiple-spaces-and-symbols-merged')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
[] |
[] |
[
"BEETS_TEST_LYRICS_SOURCES"
] |
[]
|
["BEETS_TEST_LYRICS_SOURCES"]
|
python
| 1 | 0 | |
tests/tests.py
|
#!/usr/bin/env python
import ipaddress
import os
import select
import socket
import sys
import unittest
import pycares
FIXTURES_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), 'fixtures'))
class DNSTest(unittest.TestCase):
def setUp(self):
self.channel = pycares.Channel(timeout=5.0, tries=1)
def tearDown(self):
self.channel = None
def wait(self):
while True:
read_fds, write_fds = self.channel.getsock()
if not read_fds and not write_fds:
break
timeout = self.channel.timeout()
if timeout == 0.0:
self.channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD)
continue
rlist, wlist, xlist = select.select(read_fds, write_fds, [], timeout)
for fd in rlist:
self.channel.process_fd(fd, pycares.ARES_SOCKET_BAD)
for fd in wlist:
self.channel.process_fd(pycares.ARES_SOCKET_BAD, fd)
def assertNoError(self, errorno):
if errorno == pycares.errno.ARES_ETIMEOUT and (os.environ.get('APPVEYOR') or os.environ.get('TRAVIS')):
raise unittest.SkipTest('timeout')
self.assertEqual(errorno, None)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
def test_gethostbyaddr(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.gethostbyaddr('127.0.0.1', cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
@unittest.skipIf(os.environ.get('TRAVIS') is not None, 'skipped on Travis')
def test_gethostbyaddr6(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.gethostbyaddr('::1', cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
def test_gethostbyname(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.gethostbyname('localhost', socket.AF_INET, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
def test_gethostbyname_small_timeout(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(timeout=0.5, tries=1)
self.channel.gethostbyname('localhost', socket.AF_INET, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
def test_getnameinfo(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.getnameinfo(('127.0.0.1', 80), pycares.ARES_NI_LOOKUPHOST|pycares.ARES_NI_LOOKUPSERVICE, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_nameinfo_result)
self.assertIn(self.result.node, ('localhost.localdomain', 'localhost'))
self.assertEqual(self.result.service, 'http')
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
@unittest.expectedFailure # c-ares is broken (does not return numeric service if asked) and unconditionally adds zero scope
def test_getnameinfo_ipv6(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.getnameinfo(('fd01:dec0:0:1::2020', 80, 0, 0), pycares.ARES_NI_NUMERICHOST|pycares.ARES_NI_NUMERICSERV, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_nameinfo_result)
self.assertEqual(self.result.node, 'fd01:dec0:0:1::2020')
self.assertEqual(self.result.service, '80')
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
@unittest.expectedFailure # c-ares is broken (does not return numeric service if asked)
def test_getnameinfo_ipv6_ll(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.getnameinfo(('fe80::5abd:fee7:4177:60c0', 80, 0, 666), pycares.ARES_NI_NUMERICHOST|pycares.ARES_NI_NUMERICSERV|pycares.ARES_NI_NUMERICSCOPE, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_nameinfo_result)
self.assertEqual(self.result.node, 'fe80::5abd:fee7:4177:60c0%666')
self.assertEqual(self.result.service, '80')
def test_query_a(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_a_result)
self.assertNotEqual(r.host, None)
self.assertTrue(r.ttl >= 0)
def test_query_a_bad(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('hgf8g2od29hdohid.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ENOTFOUND)
def test_query_a_rotate(self):
self.result, self.errorno = None, None
self.errorno_count, self.count = 0, 0
def cb(result, errorno):
self.result, self.errorno = result, errorno
if errorno:
self.errorno_count += 1
self.count += 1
self.channel = pycares.Channel(timeout=1.0, tries=1, rotate=True)
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.count, 3)
self.assertEqual(self.errorno_count, 0)
def test_query_aaaa(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('ipv6.google.com', pycares.QUERY_TYPE_AAAA, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_aaaa_result)
self.assertNotEqual(r.host, None)
self.assertTrue(r.ttl >= 0)
def test_query_cname(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('www.amazon.com', pycares.QUERY_TYPE_CNAME, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_query_cname_result)
def test_query_mx(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_MX, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_mx_result)
self.assertTrue(r.ttl >= 0)
def test_query_ns(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_NS, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_ns_result)
def test_query_txt(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_txt_result)
self.assertTrue(r.ttl >= 0)
def test_query_txt_chunked(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('jobscoutdaily.com', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
# If the chunks are aggregated, only one TXT record should be visible. Three would show if they are not properly merged.
# jobscoutdaily.com. 21600 IN TXT "v=spf1 " "include:emailcampaigns.net include:spf.dynect.net include:ccsend.com include:_spf.elasticemail.com ip4:67.200.116.86 ip4:67.200.116.90 ip4:67.200.116.97 ip4:67.200.116.111 ip4:74.199.198.2 " " ~all"
self.assertEqual(len(self.result), 1)
self.assertEqual(self.result[0].text, 'v=spf1 include:emailcampaigns.net include:spf.dynect.net include:ccsend.com include:_spf.elasticemail.com ip4:67.200.116.86 ip4:67.200.116.90 ip4:67.200.116.97 ip4:67.200.116.111 ip4:74.199.198.2 ~all')
def test_query_txt_multiple_chunked(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('s-pulse.co.jp', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
# s-pulse.co.jp. 3600 IN TXT "MS=ms18955624"
# s-pulse.co.jp. 3600 IN TXT "amazonses:lOgEcA9DwKFkIusIbgjpvZ2kCxaVADMlaxq9hSO3k4o="
# s-pulse.co.jp. 3600 IN TXT "v=spf1 " "include:spf-bma.mpme.jp ip4:202.248.11.9 ip4:202.248.11.10 " "ip4:218.223.68.132 ip4:218.223.68.77 ip4:210.254.139.121 " "ip4:211.128.73.121 ip4:210.254.139.122 ip4:211.128.73.122 " "ip4:210.254.139.123 ip4:211.128.73.123 ip4:210.254.139.124 " "ip4:211.128.73.124 ip4:210.254.139.13 ip4:211.128.73.13 " "ip4:52.68.199.198 include:spf.betrend.com " "include:spf.protection.outlook.com include:crmstyle.com " "~all"
self.assertEqual(len(self.result), 3)
def test_query_txt_bytes1(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_txt_result)
self.assertIsInstance(r.text, str) # it's ASCII
self.assertTrue(r.ttl >= 0)
def test_query_txt_bytes2(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('wide.com.es', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_txt_result)
self.assertIsInstance(r.text, bytes)
self.assertTrue(r.ttl >= 0)
def test_query_txt_multiple_chunked_with_non_ascii_content(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('txt-non-ascii.dns-test.hmnid.ru', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertNoError(self.errorno)
# txt-non-ascii.dns-test.hmnid.ru. IN TXT "ascii string" "some\208misc\208stuff"
self.assertEqual(len(self.result), 1)
r = self.result[0]
self.assertEqual(type(r), pycares.ares_query_txt_result)
self.assertIsInstance(r.text, bytes)
self.assertTrue(r.ttl >= 0)
def test_query_soa(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_SOA, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_query_soa_result)
self.assertTrue(self.result.ttl >= 0)
def test_query_srv(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('_xmpp-server._tcp.google.com', pycares.QUERY_TYPE_SRV, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_srv_result)
self.assertTrue(r.ttl >= 0)
def test_query_naptr(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('sip2sip.info', pycares.QUERY_TYPE_NAPTR, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_naptr_result)
self.assertTrue(r.ttl >= 0)
def test_query_ptr(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
ip = '8.8.8.8'
self.channel.query(ipaddress.ip_address(ip).reverse_pointer, pycares.QUERY_TYPE_PTR, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_query_ptr_result)
self.assertIsInstance(self.result.ttl, int)
self.assertGreaterEqual(self.result.ttl, 0)
self.assertLessEqual(self.result.ttl, 2**31-1)
self.assertEqual(type(self.result.aliases), list)
def test_query_ptr_ipv6(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
ip = '2001:4860:4860::8888'
self.channel.query(ipaddress.ip_address(ip).reverse_pointer, pycares.QUERY_TYPE_PTR, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_query_ptr_result)
self.assertIsInstance(self.result.ttl, int)
self.assertGreaterEqual(self.result.ttl, 0)
self.assertLessEqual(self.result.ttl, 2**31-1)
self.assertEqual(type(self.result.aliases), list)
def test_query_any(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_ANY, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertTrue(len(self.result) > 1)
def test_query_cancelled(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('google.com', pycares.QUERY_TYPE_NS, cb)
self.channel.cancel()
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ECANCELLED)
def test_query_bad_type(self):
self.assertRaises(ValueError, self.channel.query, 'google.com', 667, lambda *x: None)
self.wait()
def test_query_timeout(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.servers = ['1.2.3.4']
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ETIMEOUT)
def test_query_onion(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('foo.onion', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ENOTFOUND)
def test_channel_nameservers(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(timeout=5.0, tries=1, servers=['8.8.8.8'])
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertNoError(self.errorno)
def test_channel_nameservers2(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.servers = ['8.8.8.8']
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertNoError(self.errorno)
def test_channel_nameservers3(self):
servers = ['8.8.8.8', '8.8.4.4']
self.channel.servers = servers
servers2 = self.channel.servers
self.assertEqual(servers, servers2)
def test_channel_local_ip(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(timeout=5.0, tries=1, servers=['8.8.8.8'], local_ip='127.0.0.1')
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ECONNREFUSED)
def test_channel_local_ip2(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.servers = ['8.8.8.8']
self.channel.set_local_ip('127.0.0.1')
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ECONNREFUSED)
self.assertRaises(ValueError, self.channel.set_local_ip, 'an invalid ip')
def test_channel_timeout(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(timeout=0.5, tries=1)
self.channel.gethostbyname('google.com', socket.AF_INET, cb)
timeout = self.channel.timeout()
self.assertTrue(timeout > 0.0)
self.channel.cancel()
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ECANCELLED)
def test_import_errno(self):
from pycares.errno import ARES_SUCCESS
self.assertTrue(True)
def test_result_not_ascii(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('ayesas.com', pycares.QUERY_TYPE_SOA, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_query_soa_result)
self.assertIsInstance(self.result.hostmaster, bytes) # it's not ASCII
self.assertTrue(self.result.ttl >= 0)
def test_idna_encoding(self):
host = 'españa.icom.museum'
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
# try encoding it as utf-8
self.channel.gethostbyname(host.encode(), socket.AF_INET, cb)
self.wait()
self.assertEqual(self.errorno, pycares.errno.ARES_ENOTFOUND)
self.assertEqual(self.result, None)
# use it as is (it's IDNA encoded internally)
self.channel.gethostbyname(host, socket.AF_INET, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
def test_idna_encoding_query_a(self):
host = 'españa.icom.museum'
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
# try encoding it as utf-8
self.channel.query(host.encode(), pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.errorno, pycares.errno.ARES_ENOTFOUND)
self.assertEqual(self.result, None)
# use it as is (it's IDNA encoded internally)
self.channel.query(host, pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_a_result)
self.assertNotEqual(r.host, None)
def test_idna2008_encoding(self):
try:
import idna
except ImportError:
raise unittest.SkipTest('idna module not installed')
host = 'straße.de'
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.gethostbyname(host, socket.AF_INET, cb)
self.wait()
self.assertNoError(self.errorno)
self.assertEqual(type(self.result), pycares.ares_host_result)
self.assertTrue('81.169.145.78' in self.result.addresses)
@unittest.skipIf(sys.platform == 'win32', 'skipped on Windows')
def test_custom_resolvconf(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(tries=1, timeout=2.0, resolvconf_path=os.path.join(FIXTURES_PATH, 'badresolv.conf'))
self.channel.query('google.com', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertEqual(self.result, None)
self.assertEqual(self.errorno, pycares.errno.ARES_ETIMEOUT)
def test_errorcode_dict(self):
for err in ('ARES_SUCCESS', 'ARES_ENODATA', 'ARES_ECANCELLED'):
val = getattr(pycares.errno, err)
self.assertEqual(pycares.errno.errorcode[val], err)
def test_search(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel = pycares.Channel(timeout=5.0, tries=1, domains=['google.com'])
self.channel.search('cloud', pycares.QUERY_TYPE_A, cb)
self.wait()
self.assertNoError(self.errorno)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_a_result)
self.assertNotEqual(r.host, None)
def test_lookup(self):
channel = pycares.Channel(
lookups="b",
timeout=1,
tries=1,
socket_receive_buffer_size=4096,
servers=["8.8.8.8", "8.8.4.4"],
tcp_port=53,
udp_port=53,
rotate=True,
)
def on_result(result, errorno):
self.result, self.errorno = result, errorno
for domain in [
"google.com",
"microsoft.com",
"apple.com",
"amazon.com",
"baidu.com",
"alipay.com",
"tencent.com",
]:
self.result, self.errorno = None, None
self.channel.query(domain, pycares.QUERY_TYPE_A, on_result)
self.wait()
self.assertNoError(self.errorno)
self.assertTrue(self.result is not None and len(self.result) > 0)
for r in self.result:
self.assertEqual(type(r), pycares.ares_query_a_result)
self.assertNotEqual(r.host, None)
self.assertTrue(r.type == 'A')
self.assertTrue(r.ttl >= 0)
def test_strerror_str(self):
for key in pycares.errno.errorcode:
self.assertTrue(type(pycares.errno.strerror(key)), str)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[] |
[] |
[
"TRAVIS",
"APPVEYOR"
] |
[]
|
["TRAVIS", "APPVEYOR"]
|
python
| 2 | 0 | |
node.go
|
package node
// Consider for data structures: http://arslan.io/thread-safe-set-data-structure-in-go
import (
"fmt"
"github.com/garyburd/redigo/redis"
"os"
"strconv"
"sync"
"time"
)
type Node interface {
Accept(Peer) error
Listen(*Session)
Close() error
GetLocalPeer(URI, map[string]interface{}) (Peer, error)
SupportFrozenSessions() bool
}
type node struct {
closing bool
closeLock sync.Mutex
Authen
Broker
Dealer
Agent
agent *Client
sessions map[ID]*Session
sessionLock sync.Mutex
stats *NodeStats
PermMode string
Config *NodeConfig
RedisPool *redis.Pool
}
// NewDefaultNode creates a very basic WAMP Node.
func NewNode(config *NodeConfig) Node {
node := &node{
sessions: make(map[ID]*Session),
Agent: NewAgent(),
stats: NewNodeStats(),
PermMode: os.Getenv("EXIS_PERMISSIONS"),
Config: config,
RedisPool: NewRedisPool(config.RedisServer, config.RedisPassword),
}
if config.RedisServer == "" {
out.Debug("Redis: DISABLED")
node.Broker = NewDefaultBroker(node)
node.Dealer = NewDefaultDealer(node)
} else {
out.Debug("Redis: %s", config.RedisServer)
ClearTransientSessions(node.RedisPool)
node.Broker = NewRedisBroker(node)
node.Dealer = NewRedisDealer(node)
}
// Open a file for logging messages.
// Note: this must come before we set up the local agent.
if config.MessageLogFile != "" {
node.stats.OpenMessageLog(config.MessageLogFile, config.MessageLogMaxLines)
}
// For the startup phase, we will hold calls without a registered procedure.
if config.HoldCalls > 0 {
go func() {
time.Sleep(time.Duration(config.HoldCalls) * time.Second)
node.Dealer.SetCallHolding(false)
}()
}
node.agent = node.localClient(config.Agent)
node.Authen = NewAuthen(node)
node.RegisterNodeMethods()
return node
}
func (node *node) Close() error {
node.closeLock.Lock()
if node.closing {
node.closeLock.Unlock()
return fmt.Errorf("already closed")
}
node.closing = true
node.closeLock.Unlock()
// Tell all sessions wer're going down
// sessions must be locked before access
node.sessionLock.Lock()
for _, s := range node.sessions {
s.kill <- ErrSystemShutdown
}
node.sessions = make(map[ID]*Session)
node.sessionLock.Unlock()
return nil
}
func (node *node) Accept(client Peer) error {
sess, ok := node.Handshake(client)
node.stats.LogEvent("SessionAccept")
if ok != nil {
return ok
}
// Start listening on the session
// This will eventually move to the session
go node.Listen(&sess)
return nil
}
// Spin on a session, wait for messages to arrive. Method does not return
func (node *node) Listen(sess *Session) {
c := sess.Receive()
node.SendJoinNotification(sess)
limit := node.Config.GetRequestLimit(sess.authid)
limiter := NewBasicLimiter(limit)
out.Debug("Request rate limit for %s: %d/s", sess, limit)
for {
var open bool
var msg Message
limiter.Acquire()
select {
case msg, open = <-c:
if !open {
//log.Println("lost session:", sess)
node.SessionClose(sess)
return
}
case reason := <-sess.kill:
logErr(sess.Send(&Goodbye{Reason: reason, Details: make(map[string]interface{})}))
//log.Printf("kill session %s: %v", sess, reason)
//NEW: Exit the session!
node.SessionClose(sess)
return
}
node.Handle(&msg, sess)
}
}
// Assign a session ID to a new session.
//
// authid: domain of agent according to validated credentials
// domain: domain requested by the agent, verified subdomain of authid
// authextra: extra data passed with Hello message
func (n *node) assignSessionID(authid string, domain string, authextra map[string]interface{}) (ID, error) {
// We must have been configured to connect to Redis in order to support
// persistent session IDs.
if n.Config.RedisServer == "" {
return NewID(), nil
}
sessionID, ok := authextra["sessionID"].(string)
if ok {
tmpID, err := strconv.ParseInt(sessionID, 0, 64)
if err != nil {
return ID(0), fmt.Errorf("Error parsing session ID (%s)", sessionID)
}
reclaimID := ID(tmpID)
err = ReclaimSessionID(n.RedisPool, reclaimID, authid, domain)
if err != nil {
return ID(0), err
}
return reclaimID, nil
}
newID, err := NewSessionID(n.RedisPool, domain)
return newID, err
}
func (n *node) handleExtraFields(sess *Session, extra map[string]interface{}) error {
_, hasGuardian := extra["guardianDomain"]
_, hasGuardianID := extra["guardianID"]
sess.canFreeze = hasGuardian && hasGuardianID
tmp, hasResume := extra["resumeFrom"].(string)
if hasResume {
tmpID, err := strconv.ParseInt(tmp, 0, 64)
if err != nil {
return fmt.Errorf("Error parsing resume ID (%s)", tmp)
}
resumeID := ID(tmpID)
if ResumeSessionPermitted(n.RedisPool, resumeID, sess.authid) {
sess.resumeFrom = resumeID
}
}
return nil
}
// Handle a new Peer, creating and returning a session
func (n *node) Handshake(client Peer) (Session, error) {
handled := NewHandledMessage("Hello")
sess := Session{
Peer: client,
messageCounts: make(map[string]int64),
kill: make(chan URI, 1),
}
// Dont accept new sessions if the node is going down
if n.closing {
logErr(client.Send(&Abort{Reason: ErrSystemShutdown}))
logErr(client.Close())
return sess, fmt.Errorf("Node is closing, no new connections are allowed")
}
msg, err := GetMessageTimeout(client, 5*time.Second)
if err != nil {
return sess, err
}
hello, msgOk := msg.(*Hello)
// Ensure the message is valid and well constructed
if !msgOk {
logErr(client.Send(&Abort{Reason: URI("wamp.error.protocol_violation")}))
logErr(client.Close())
return sess, fmt.Errorf("protocol violation: expected HELLO, received %s", msg.MessageType())
}
sess.pdid = hello.Realm
sess.authid = string(hello.Realm)
// Old implementation: the authentication must occur before fetching the realm
welcome, err := n.Authen.handleAuth(&sess, hello)
if err != nil {
abort := &Abort{
Reason: ErrAuthenticationFailed,
Details: map[string]interface{}{"error": err.Error()},
}
logErr(client.Send(abort))
logErr(client.Close())
return sess, AuthenticationError(err.Error())
}
authextra, _ := hello.Details["authextra"].(map[string]interface{})
welcome.Id, err = n.assignSessionID(sess.authid, string(sess.pdid), authextra)
if err != nil {
abort := &Abort{
Reason: ErrAuthenticationFailed,
Details: map[string]interface{}{"error": err.Error()},
}
logErr(client.Send(abort))
logErr(client.Close())
return sess, AuthenticationError(err.Error())
}
sess.Id = welcome.Id
n.handleExtraFields(&sess, authextra)
if n.SupportFrozenSessions() {
StoreSessionDetails(n.RedisPool, &sess, authextra)
}
if welcome.Details == nil {
welcome.Details = make(map[string]interface{})
}
// add default details to welcome message
for k, v := range defaultWelcomeDetails {
if _, ok := welcome.Details[k]; !ok {
welcome.Details[k] = v
}
}
if err := client.Send(welcome); err != nil {
return sess, err
}
out.Notice("Session open: %s", string(hello.Realm))
n.sessionLock.Lock()
n.sessions[sess.Id] = &sess
n.sessionLock.Unlock()
// Note: we are ignoring the CR exchange and just logging it as a
// Hello-Welcome exchange.
effect := NewMessageEffect("", "Welcome", sess.Id)
n.stats.LogMessage(&sess, handled, effect)
return sess, nil
}
// Called when a session is closed or closes itself
func (n *node) SessionClose(sess *Session) {
sess.Close()
out.Notice("Session close: %s", sess)
n.Dealer.lostSession(sess)
n.Broker.lostSession(sess)
// If this is an ordinary session, then clear all subscriptions and
// registrations now.
if !sess.canFreeze && n.Config.RedisServer != "" {
RedisRemoveSession(n.RedisPool, sess.Id)
}
n.stats.LogEvent("SessionClose")
n.SendLeaveNotification(sess)
n.sessionLock.Lock()
delete(n.sessions, sess.Id)
n.sessionLock.Unlock()
// We log a special _Close message in case there was no Goodbye message
// associated with this session closing.
handled := NewHandledMessage("_Close")
effect := NewMessageEffect("", "", sess.Id)
n.stats.LogMessage(sess, handled, effect)
}
// Publish a notification that a session joined.
// If "xs.a.b" joins, the message is published to "x.a/sessionJoined".
func (n *node) SendJoinNotification(sess *Session) {
args := []interface{}{}
kwargs := map[string]interface{}{
"id": sess.Id,
"agent": string(sess.pdid),
}
endpoint := popDomain(string(sess.pdid)) + "/sessionJoined"
// Note: we are not using the agent to publish these messages because the
// agent itself triggers a sessionJoined message.
msg := &Publish{
Request: NewID(),
Topic: URI(endpoint),
Arguments: args,
ArgumentsKw: kwargs,
}
n.Broker.Publish(nil, msg)
}
// Publish a notification that a session left.
// If "xs.a.b" leaves, the message is published to "x.a/sessionLeft".
func (n *node) SendLeaveNotification(sess *Session) {
args := []interface{}{
string(sess.pdid),
}
kwargs := map[string]interface{}{
"id": sess.Id,
"agent": string(sess.pdid),
}
endpoint := popDomain(string(sess.pdid)) + "/sessionLeft"
msg := &Publish{
Request: NewID(),
Topic: URI(endpoint),
Arguments: args,
ArgumentsKw: kwargs,
}
n.Broker.Publish(nil, msg)
}
func (n *node) LogMessage(msg *Message, sess *Session) {
// Extract the target domain from the message
target, err := destination(msg)
// Make errors nice and pretty. These are riffle error messages, not node errors
m := *msg
if m.MessageType() == ERROR {
out.Warning("%s from %s: %v", m.MessageType(), *sess, *msg)
} else if err == nil {
out.Debug("%s %s from %s", m.MessageType(), string(target), *sess)
} else {
out.Debug("%s from %s", m.MessageType(), *sess)
}
typeName := messageTypeString(*msg)
n.stats.LogEvent(typeName)
sess.messageCounts[typeName]++
}
// Handle a new message
func (n *node) Handle(msg *Message, sess *Session) {
// NOTE: there is a serious shortcoming here: How do we deal with WAMP messages with an
// implicit destination? Many of them refer to sessions, but do we want to store the session
// IDs with the ultimate PDID target, or just change the protocol?
handled := NewHandledMessage(messageTypeString(*msg))
var effect *MessageEffect
n.LogMessage(msg, sess)
// Extract the target domain from the message
target, err := destination(msg)
if err == nil {
// Ensure the construction of the message is valid, extract the endpoint, domain, and action
_, _, err := breakdownEndpoint(string(target))
// Return a WAMP error to the user indicating a poorly constructed endpoint
if err != nil {
out.Error("Misconstructed endpoint: %s", msg)
m := *msg
err := &Error{
Type: m.MessageType(),
Request: requestID(msg),
Details: map[string]interface{}{"Invalid Endpoint": "Poorly constructed endpoint."},
Error: ErrInvalidUri,
}
sess.Peer.Send(err)
effect = NewErrorMessageEffect("", ErrInvalidUri, 0)
n.stats.LogMessage(sess, handled, effect)
return
}
verb, ok := GetMessageVerb(*msg)
// Downward domain action? That is, endpoint is a subdomain of the current agent?
if !ok || !n.Permitted(target, sess, verb) {
out.Warning("Action not allowed: %s:%s", sess.pdid, target)
m := *msg
err := &Error{
Type: m.MessageType(),
Request: requestID(msg),
Details: map[string]interface{}{"Not Permitted": "Action not permitted."},
Error: ErrNotAuthorized,
}
sess.Peer.Send(err)
effect = NewErrorMessageEffect("", ErrNotAuthorized, 0)
n.stats.LogMessage(sess, handled, effect)
return
}
}
switch msg := (*msg).(type) {
case *Goodbye:
logErr(sess.Send(&Goodbye{Reason: ErrGoodbyeAndOut, Details: make(map[string]interface{})}))
effect = NewMessageEffect("", "Goodbye", sess.Id)
// log.Printf("[%s] leaving: %v", sess, msg.Reason)
// Broker messages
case *Publish:
effect = n.Broker.Publish(sess, msg)
case *Subscribe:
effect = n.Broker.Subscribe(sess, msg)
case *Unsubscribe:
effect = n.Broker.Unsubscribe(sess, msg)
// Dealer messages
case *Register:
effect = n.Dealer.Register(sess, msg)
case *Unregister:
effect = n.Dealer.Unregister(sess, msg)
case *Call:
effect = n.Dealer.Call(sess, msg)
case *Yield:
effect = n.Dealer.Yield(sess, msg)
// Error messages
case *Error:
if msg.Type == INVOCATION {
// the only type of ERROR message the Node should receive
effect = n.Dealer.Error(sess, msg)
} else {
out.Critical("invalid ERROR message received: %v", msg)
}
default:
out.Critical("Unhandled message:", msg.MessageType())
}
// effect is nil in the case of messages we don't know how to handle.
if effect != nil {
n.stats.LogMessage(sess, handled, effect)
}
}
// Return true or false based on the message and the session which sent the message
func (n *node) Permitted(endpoint URI, sess *Session, verb string) bool {
// Permissions checking is turned off---only for testing, please!
if n.PermMode == "off" {
return true
}
// The node is always permitted to perform any action
if sess.isLocal() {
return true
}
// Always allow downward actions.
if subdomain(string(sess.authid), string(endpoint)) {
return true
}
// TODO Check permissions cache: if found, allow
// TODO: save a permitted action in some flavor of cache
return n.AskBouncer(string(sess.authid), string(endpoint), verb)
// No bouncer approved it.
return false
}
func (n *node) AskBouncer(authid string, target string, verb string) bool {
// Check with bouncer(s) on permissions check.
// At least one bouncer needs to approve a non-downward action.
if n.Config.Bouncer == "" {
return false
}
checkPerm := n.Config.Bouncer + "/checkPerm"
bouncerActive := n.Dealer.hasRegistration(URI(checkPerm))
if !bouncerActive {
out.Warning("Bouncer (%s) not registered", checkPerm)
return false
}
args := []interface{}{authid, target, verb}
ret, err := n.agent.Call(checkPerm, args, nil)
if err != nil {
out.Critical("Error, returning false: %s", err)
return false
}
permitted, ok := ret.Arguments[0].(bool)
return ok && permitted
}
// returns the pdid of the next hop on the path for the given message
func (n *node) Route(msg *Message) string {
// Is target a tenant?
// Is target in forwarding tables?
// Ask map for next hop
return ""
}
func (node *node) EvictDomain(domain string) int {
count := 0
node.sessionLock.Lock()
defer node.sessionLock.Unlock()
for _, sess := range node.sessions {
if subdomain(domain, string(sess.pdid)) {
sess.kill <- ErrSessionEvicted
count++
}
}
return count
}
// GetLocalPeer returns an internal peer connected to the specified realm.
func (r *node) GetLocalPeer(realmURI URI, details map[string]interface{}) (Peer, error) {
peerA, peerB := localPipe()
sess := Session{Peer: peerA, Id: NewID(), kill: make(chan URI, 1)}
out.Notice("Established internal session:", sess.Id)
if details == nil {
details = make(map[string]interface{})
}
go r.Listen(&sess)
return peerB, nil
}
func (r *node) getTestPeer() Peer {
peerA, peerB := localPipe()
go r.Accept(peerA)
return peerB
}
var defaultWelcomeDetails = map[string]interface{}{
"roles": map[string]struct{}{
"broker": {},
"dealer": {},
},
}
// Is support for frozen sessions enabled?
// Requires external persistence of session information.
func (r *node) SupportFrozenSessions() bool {
return r.Config.RedisServer != ""
}
func (r *node) GetSession(session ID) (*Session, bool) {
r.sessionLock.Lock()
defer r.sessionLock.Unlock()
s, ok := r.sessions[session]
return s, ok
}
////////////////////////////////////////
// Misc and old
////////////////////////////////////////
func (n *node) localClient(s string) *Client {
p := n.getTestPeer()
client := NewClient(p)
client.ReceiveTimeout = 60 * time.Second
if _, err := client.JoinRealm(s, nil); err != nil {
out.Error("Error when creating new client: ", err)
}
client.pdid = URI(s)
return client
}
|
[
"\"EXIS_PERMISSIONS\""
] |
[] |
[
"EXIS_PERMISSIONS"
] |
[]
|
["EXIS_PERMISSIONS"]
|
go
| 1 | 0 | |
src/main.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler
import telegram as tg
import requests
import json
import os
import io
import time
import logging
from datetime import timedelta
import translate
import random
import praw
REDDIT_BOT_ID = os.environ['REDDIT_BOT_ID']
REDDIT_BOT_SECRET = os.environ['REDDIT_BOT_SECRET']
REDDIT_USER_AGENT = os.environ['REDDIT_USER_AGENT']
USER_AGENT_BROWSER = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'
royalTitles = ["Lé", "Baron", "König", "Archlord", "Genius", "Ritter", "Curry", "Burger", "Mc", "Doktor", "Gentoomaster", "Chef", "Lead Developer"]
firstFrag = ["Schm", "J", "Hans-J", "K", "G", "Gr", "B", "Str", "Kr", "Rask"]
secondFrag = ["oerg", "öck", "öhhhrk", "öhrp", "egor", "oeg", "ock"]
thirdFrag = ["inger", "erino", "aroni", "us", "sell", "topus", "thulu", "tain", "rid", "odil", "ette", "nikov"]
nobleAnnex = ["I.", "II.", "III.", "Royale", "dem Allmächtigen", "dem Weisen", "dem hochgradig Intelligenten", "dem Unendlichen", "dem Allwissenden", "dem Gentoobändiger", "dem Meisterinformatiker"]
wisdoms = ["Linux ist voll doof!", "Ich stehe immer um 7.00 Uhr auf!", "Tut schön viel Frischkäse in die Nudelsoße!", "Mensen um 11.00 Uhr ist eine super Sache!", "Ich habe WinRar gekauft!", "Für einen längeren XP-Supportzeitraum!", "Fasst meinen Laptopbildschirm an!", "Natürlich code ich dieses Feature für euch, ganz ohne Pull Request!", "Maxime ist ein toller Papa!", "Hirtenkäsepizza ist die beste!", "Sauerkraut ist doch ekelhaft!", "Mein Lieblingsbrowser ist ja der Internet Explorer!", "Rechtschreibfehler in Kommentaren? Voll okay!", "Party? Warum nicht bei mir zu Hause?", "Irgendwas mit dynamisch Parameter injecten!", "Wie war das mit den Speisezeiten?", "Ich kaufe nur bei Nvidia!", "Wer braucht schon Open Source...", "KöckOS? Kommt noch diese Woche raus!", "Die besten Witze sind Deine-Mutter-Witze!", "Mein Lieblings-OS ist iOS!", "Ein Halloumiburger ist eine eigenständige Mahlzeit!", "Ich kaufe mir ein MacBook!", "Ich fange wieder mit Medieninformatik an!", "Ich liebe Ubuntu!", "Verschlüsselung ist doch Unsinn!", "Machen wir alle ne gemeinsame WG auf?"]
haes = ["HÄ?", "VALORANT?", "WIE", "WANN", "WO", "Geller muss erst noch zu Ende essen!", "???", "*Random Katzenbild*", "Erstmal Valorant!", "ICH HASSE EUCH ALLE", "HÄÄÄ", "ICH ARBEITE", "ICH HASSE DEN", "FUCK YOU", "WIRKLICH", "BITTE", "Natürlich ist das gelb!", "Es gibt Kuchen!", "Wir haben wieder viel zu viel Lasagne!", "Oke", "WAS", "WAS MEINST DU", "WAS WILLST DU DENN JETZT SCHON WIEDER", "Alter", "Wirst schon sehen", "Denk nach du Schwamm", "Stop", "NICHT COOL", "TROLL NICHT RUM", "Uff", "AAAAARGH", "Kann den jemand kicken?", "DU HAST NUR ANGST VOR MIR", "EKELHAFT", "ICH HASSE ALLES", "WOFÜR", "ICH BIN IMMER SO", "KUCHEN", "LASAGNE", "SCHANDE", "WARUM ICH", "ICH LIEBE ARBEITEN", "ICH HASSE UNPÜNKTLICHKEIT", "IDIOT", "HEY", "WO SEID IHR", "WAS SONST", "KIBA", "HAHA", "VERSTEHT IHR DAS NICHT", "SEID IHR DUMM ODER WAS", "WTF", "RED DEUTSCH MIT MIR", "OMG", "LOL", ":)", "MIR IST LANGWEILIG", "ALS OB IHR ALLE SCHON SCHLAFT", "HALLO", "WEIß ICH NICHT", "WER DENKT SICH DAS AUS", "ICH SPRING LIEBER AUS DEM FENSTER", "NE"]
class NotifyUserException(Exception):
"""Raised whenever an error needs to be propagated to the user"""
pass
def start(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text="Reichenbach is never an option!")
def echoText(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text=update.message.text)
def echoSticker(update, context):
sticker = update.message.sticker
context.bot.send_sticker(chat_id=update.message.chat_id, sticker=sticker)
def mensa(update, context):
params = context.args
if len(params) < 1:
daysToAdd = 0
else:
try:
daysToAdd = int(params[0])
except ValueError:
context.bot.send_message(chat_id=update.message.chat_id, text="The first and only parameter has to be an integer value. Aborting.")
return
day = update.message.date.date() + timedelta(days=daysToAdd)
url = "https://openmensa.org/api/v2/canteens/79/days/" + day.strftime("%Y-%m-%d") + "/meals"
resp = requests.get(url)
if not resp.ok:
context.bot.send_message(chat_id=update.message.chat_id, text="I failed miserably. Disgrace!")
return
jsonData = json.loads(resp.content)
for elem in jsonData:
mealNotes = elem["notes"]
if "vegetarisch" in mealNotes or "vegan" in mealNotes:
context.bot.send_message(chat_id=update.message.chat_id, text="*" + elem["name"] + "*", parse_mode="Markdown")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="_" + elem["name"] + "_", parse_mode="Markdown")
def andre(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text="Höhöhö Reichenbach!")
def leon(update, context):
joke = dadJoke()
context.bot.send_message(chat_id=update.message.chat_id, text=joke)
def loen(update, context):
joke = dadJoke()
translator = translate.Translator(from_lang='en', to_lang='de')
translatedJoke = translator.translate(joke)
context.bot.send_message(chat_id=update.message.chat_id, text=translatedJoke)
def dadJoke():
headers = {'Accept': 'text/plain '}
resp = requests.get("https://icanhazdadjoke.com/", headers=headers)
if not resp.ok:
return "I failed miserably. Disgrace!"
return resp.text
def georg(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text="https://wiki.archlinux.org/index.php/Installation_guide")
def maxime(update, context):
context.bot.send_sticker(chat_id=update.message.chat_id, sticker="CAADBQADfAMAAukKyAPfAAFRgAuYdNoWBA")
def andrey(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text="11.00 Bois. Yeef!")
def steffuu(update, context):
context.bot.send_message(chat_id=update.message.chat_id, text=random.choice(haes))
def getXkcd(id, rand):
resp = requests.get("https://xkcd.com/info.0.json")
if not resp.ok:
raise NotifyUserException("I failed miserably. Disgrace!")
jsonData = json.loads(resp.content)
upperLimit = jsonData["num"]
if rand:
id = random.randint(1, upperLimit)
elif id > upperLimit:
raise NotifyUserException("Id not in range. Maximum id currently is " + str(upperLimit) + ".")
resp = requests.get("https://xkcd.com/" + str(id) + "/info.0.json")
if not resp.ok:
raise NotifyUserException("I failed miserably. Disgrace!")
jsonData = json.loads(resp.content)
return (id, jsonData["img"], jsonData["title"])
def xkcd(update, context):
params = context.args
rand = False
id = 0
if len(params) < 1:
rand = True
else:
try:
id = int(params[0])
except ValueError:
context.bot.send_message(chat_id=update.message.chat_id, text="The first and only parameter has to be a positive integer value greater than 0. Aborting.")
return
if id < 1:
context.bot.send_message(chat_id=update.message.chat_id, text="The first and only parameter has to be a positive integer value greater than 0. Aborting.")
return
try:
xkcd = getXkcd(id, rand)
except NotifyUserException as error:
context.bot.send_message(chat_id=update.message.chat_id, text=str(error))
return
context.bot.send_photo(chat_id=update.message.chat_id, photo=xkcd[1], caption=str(xkcd[0]) + " - " + xkcd[2])
def decision(update, context):
headers = {'Accept': 'text/plain '}
resp = requests.get("https://yesno.wtf/api/", headers=headers)
if not resp.ok:
raise NotifyUserException("oof")
data = json.loads(resp.text)
context.bot.send_animation(chat_id=update.message.chat_id, animation=data["image"], caption=data["answer"])
def subredditImg(subreddit, offset=0, count=5):
imageFileEndings = [".png", ".jpg", ".jpeg", ".webp", ".gif"]
reddit = praw.Reddit(client_id=REDDIT_BOT_ID, client_secret=REDDIT_BOT_SECRET, user_agent=REDDIT_USER_AGENT)
images = []
for post in reddit.subreddit(subreddit).hot(limit=count):
for ending in imageFileEndings:
if str(post.url).endswith(ending):
images.append(post.url)
return images
def r(update, context):
params = context.args
offset = 0
if len(params) < 1:
context.bot.send_message(chat_id=update.message.chat_id, text="The first parameter has to be a string identifying the requested subreddit. Aborting.")
return
subreddit = params[0]
if len(params) > 1:
try:
offset = int(params[1])
except ValueError:
context.bot.send_message(chat_id=update.message.chat_id, text="The second parameter has to be a positive integer value. Aborting.")
return
if offset < 0:
context.bot.send_message(chat_id=update.message.chat_id, text="The second parameter has to be a positive integer value. Aborting.")
return
try:
images = subredditImg(subreddit)
except Exception:
context.bot.send_message(chat_id=update.message.chat_id, text="Something went wrong internally. I am deeply sorry.")
return
if len(images) == 0:
context.bot.send_message(chat_id=update.message.chat_id, text="There are no images in the top 5 posts.")
return
for image in images:
context.bot.send_photo(chat_id=update.message.chat_id, photo=image)
def cat(update, context):
context.bot.send_photo(
chat_id=update.message.chat_id,
photo="https://thiscatdoesnotexist.com?time=" + str(time.time()) + str(random.randint(1, 1024))
)
def horse(update, context):
context.bot.send_photo(
chat_id=update.message.chat_id,
photo="https://thishorsedoesnotexist.com?time=" + str(time.time()) + str(random.randint(1, 1024))
)
def person(update, context):
resp = requests.get("https://thispersondoesnotexist.com/image?time=" + str(time.time()) + str(random.randint(1, 1024)), headers={'User-Agent': 'USER_AGENT_BROWSER'})
if not resp.ok:
context.bot.send_message(chat_id=update.message.chat_id, text="Something went wrong internally. I am deeply sorry.")
return
with io.BytesIO(resp.content) as buf:
context.bot.send_photo(chat_id=update.message.chat_id, photo=buf)
def wisdom(update, context):
wisdom = createWisdomString()
context.bot.send_message(chat_id=update.message.chat_id, text=wisdom)
def createWisdomString():
optionalNoble = None
optionalThird = None
optionalAnnex = None
if bool(random.getrandbits(1)):
optionalNoble = random.choice(royalTitles)
if bool(random.getrandbits(1)):
optionalThird = random.choice(thirdFrag)
if bool(random.getrandbits(1)):
optionalAnnex = random.choice(nobleAnnex)
mainBody = random.choice(firstFrag) + random.choice(secondFrag)
output = "Die heutige Weisheit von "
if optionalNoble:
output += optionalNoble + " " + mainBody
else:
output += mainBody
if optionalThird:
output += optionalThird
if optionalAnnex:
output += " " + optionalAnnex
output += ": " + random.choice(wisdoms)
return output
def choose(update, context):
params = context.args
if len(params) < 1:
context.bot.send_message(chat_id=update.message.chat_id, text="You know, I can't choose if there is nothing to choose from. Wise words!")
return
elif len(params) == 1:
context.bot.send_message(chat_id=update.message.chat_id, text="How the hell am I supposed to choose when only value is entered? Gosh.")
return
else:
context.bot.send_message(chat_id=update.message.chat_id, text=random.choice(params) + " shall be my answer!")
def inlineR(update, context):
query = update.inline_query.query
results = []
try:
images = subredditImg(query, count=40)
except Exception:
results.append(tg.InlineQueryResultArticle(0, "No", tg.InputTextMessageContent("No!")))
else:
if len(images) == 0:
results.append(tg.InlineQueryResultArticle(0, "No", "No!", ))
else:
for img in images:
results.append(tg.InlineQueryResultPhoto(img, img, img))
finally:
update.inline_query.answer(results)
def main():
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
API_TOKEN = os.environ['TELEGRAM_APITOKEN']
APP_ADDR = os.environ['APP_ADDRESS']
PORT = int(os.environ.get('PORT', '8443'))
updater = Updater(token=API_TOKEN, use_context=True)
startHandler = CommandHandler('start', start)
updater.dispatcher.add_handler(startHandler)
mensaHandler = CommandHandler('mensa', mensa)
updater.dispatcher.add_handler(mensaHandler)
andreHandler = CommandHandler('andre', andre)
updater.dispatcher.add_handler(andreHandler)
leonHandler = CommandHandler('leon', leon)
updater.dispatcher.add_handler(leonHandler)
georgHandler = CommandHandler('georg', georg)
updater.dispatcher.add_handler(georgHandler)
loenHandler = CommandHandler('loen', loen)
updater.dispatcher.add_handler(loenHandler)
maximeHandler = CommandHandler('maxime', maxime)
updater.dispatcher.add_handler(maximeHandler)
andreyHandler = CommandHandler('andrey', andrey)
updater.dispatcher.add_handler(andreyHandler)
steffuuHandler = CommandHandler('steffuu', steffuu)
updater.dispatcher.add_handler(steffuuHandler)
xkcdHandler = CommandHandler('xkcd', xkcd)
updater.dispatcher.add_handler(xkcdHandler)
decisionHandler = CommandHandler('decision', decision)
updater.dispatcher.add_handler(decisionHandler)
redditImgHandler = CommandHandler('r', r)
updater.dispatcher.add_handler(redditImgHandler)
echoHandlerText = MessageHandler(Filters.text, echoText)
updater.dispatcher.add_handler(echoHandlerText)
echoHandlerSticker = MessageHandler(Filters.sticker, echoSticker)
updater.dispatcher.add_handler(echoHandlerSticker)
catHandler = CommandHandler('cat', cat)
updater.dispatcher.add_handler(catHandler)
horseHandler = CommandHandler('horse', horse)
updater.dispatcher.add_handler(horseHandler)
personHandler = CommandHandler('person', person)
updater.dispatcher.add_handler(personHandler)
wisdomHandler = CommandHandler('wisdom', wisdom)
updater.dispatcher.add_handler(wisdomHandler)
chooseHandler = CommandHandler('choose', choose)
updater.dispatcher.add_handler(chooseHandler)
inlineRedditHandler = InlineQueryHandler(inlineR)
updater.dispatcher.add_handler(inlineRedditHandler)
updater.start_webhook(listen="0.0.0.0", port=PORT, url_path=API_TOKEN)
updater.bot.set_webhook(APP_ADDR + API_TOKEN)
updater.idle()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PORT",
"REDDIT_USER_AGENT",
"REDDIT_BOT_ID",
"APP_ADDRESS",
"TELEGRAM_APITOKEN",
"REDDIT_BOT_SECRET"
] |
[]
|
["PORT", "REDDIT_USER_AGENT", "REDDIT_BOT_ID", "APP_ADDRESS", "TELEGRAM_APITOKEN", "REDDIT_BOT_SECRET"]
|
python
| 6 | 0 | |
suncasa/suncasatasks/gotasks/ptclean6.py
|
##################### generated by xml-casa (v2) from ptclean6.xml ##################
##################### 6a89d05724a14fedd7b8ceb75d841936 ##############################
from __future__ import absolute_import
from casashell.private.stack_manip import find_local as __sf__
from casashell.private.stack_manip import find_frame as _find_frame
from casatools.typecheck import validator as _pc
from casatools.coercetype import coerce as _coerce
from suncasatasks import ptclean6 as _ptclean6_t
from collections import OrderedDict
import numpy
import sys
import os
import shutil
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
class _ptclean6:
"""
ptclean6 ---- Parallelized tclean in consecutive time steps
Parallelized clean in consecutive time steps. Packed over CASA 6 tclean.
--------- parameter descriptions ---------------------------------------------
vis Name(s) of input visibility file(s)
default: none;
example: vis='ngc5921.ms'
vis=['ngc5921a.ms','ngc5921b.ms']; multiple MSes
imageprefix Prefix of output image names (usually useful in defining the output path)
imagesuffix Suffix of output image names (usually useful in specifyting the image type, version, etc.)
ncpu Number of cpu cores to use
twidth Number of time pixels to average
doreg True if use vla_prep to register the image
usephacenter True if use the phacenter information from the measurement set (e.g., VLA); False to assume the phase center is at the solar disk center (EOVSA)
reftime Reference time of the J2000 coordinates associated with the ephemeris target. e.g., "2012/03/03/12:00". This is used for helioimage2fits.py to find the solar x y offset in order to register the image. If not set, use the actual timerange of the image (default)
toTb True if convert to brightness temperature
sclfactor scale the brightness temperature up by its value
subregion The name of a CASA region string
The name of a CASA image or region file or region string. Only locations within the region will
output to the fits file.
If regions specified fall completely outside of the image, ptclean6 will throw an error.
Manual mask options/examples :
subregion='box[[224pix,224pix],[288pix,288pix]]' : A CASA region string.
docompress True if compress the output fits files
overwrite True if overwrite the image
selectdata Enable data selection parameters.
field to image or mosaic. Use field id(s) or name(s).
['go listobs' to obtain the list id's or names]
default: ''= all fields
If field string is a non-negative integer, it is assumed to
be a field index otherwise, it is assumed to be a
field name
field='0~2'; field ids 0,1,2
field='0,4,5~7'; field ids 0,4,5,6,7
field='3C286,3C295'; field named 3C286 and 3C295
field = '3,4C*'; field id 3, all names starting with 4C
For multiple MS input, a list of field strings can be used:
field = ['0~2','0~4']; field ids 0-2 for the first MS and 0-4
for the second
field = '0~2'; field ids 0-2 for all input MSes
spw l window/channels
NOTE: channels de-selected here will contain all zeros if
selected by the parameter mode subparameters.
default: ''=all spectral windows and channels
spw='0~2,4'; spectral windows 0,1,2,4 (all channels)
spw='0:5~61'; spw 0, channels 5 to 61
spw='<2'; spectral windows less than 2 (i.e. 0,1)
spw='0,10,3:3~45'; spw 0,10 all channels, spw 3,
channels 3 to 45.
spw='0~2:2~6'; spw 0,1,2 with channels 2 through 6 in each.
For multiple MS input, a list of spw strings can be used:
spw=['0','0~3']; spw ids 0 for the first MS and 0-3 for the second
spw='0~3' spw ids 0-3 for all input MS
spw='3:10~20;50~60' for multiple channel ranges within spw id 3
spw='3:10~20;50~60,4:0~30' for different channel ranges for spw ids 3 and 4
spw='0:0~10,1:20~30,2:1;2;3'; spw 0, channels 0-10,
spw 1, channels 20-30, and spw 2, channels, 1,2 and 3
spw='1~4;6:15~48' for channels 15 through 48 for spw ids 1,2,3,4 and 6
timerange Range of time to select from data
default: '' (all); examples,
timerange = 'YYYY/MM/DD/hh:mm:ss~YYYY/MM/DD/hh:mm:ss'
Note: if YYYY/MM/DD is missing date defaults to first
day in data set
timerange='09:14:0~09:54:0' picks 40 min on first day
timerange='25:00:00~27:30:00' picks 1 hr to 3 hr
30min on NEXT day
timerange='09:44:00' pick data within one integration
of time
timerange='> 10:24:00' data after this time
For multiple MS input, a list of timerange strings can be
used:
timerange=['09:14:0~09:54:0','> 10:24:00']
timerange='09:14:0~09:54:0''; apply the same timerange for
all input MSes
uvrange Select data within uvrange (default unit is meters)
default: '' (all); example:
uvrange='0~1000klambda'; uvrange from 0-1000 kilo-lambda
uvrange='> 4klambda';uvranges greater than 4 kilo lambda
For multiple MS input, a list of uvrange strings can be
used:
uvrange=['0~1000klambda','100~1000klamda']
uvrange='0~1000klambda'; apply 0-1000 kilo-lambda for all
input MSes
antenna Select data based on antenna/baseline
default: '' (all)
If antenna string is a non-negative integer, it is
assumed to be an antenna index, otherwise, it is
considered an antenna name.
antenna='5\&6'; baseline between antenna index 5 and
index 6.
antenna='VA05\&VA06'; baseline between VLA antenna 5
and 6.
antenna='5\&6;7\&8'; baselines 5-6 and 7-8
antenna='5'; all baselines with antenna index 5
antenna='05'; all baselines with antenna number 05
(VLA old name)
antenna='5,6,9'; all baselines with antennas 5,6,9
index number
For multiple MS input, a list of antenna strings can be
used:
antenna=['5','5\&6'];
antenna='5'; antenna index 5 for all input MSes
antenna='!DV14'; use all antennas except DV14
scan Scan number range
default: '' (all)
example: scan='1~5'
For multiple MS input, a list of scan strings can be used:
scan=['0~100','10~200']
scan='0~100; scan ids 0-100 for all input MSes
observation Observation ID range
default: '' (all)
example: observation='1~5'
intent Scan Intent(s)
default: '' (all)
example: intent='TARGET_SOURCE'
example: intent='TARGET_SOURCE1,TARGET_SOURCE2'
example: intent='TARGET_POINTING*'
datacolumn Data column to image (data or observed, corrected)
default:'corrected'
( If 'corrected' does not exist, it will use 'data' instead )
imagename Pre-name of output images
example : imagename='try'
Output images will be (a subset of) :
try.psf - Point spread function
try.residual - Residual image
try.image - Restored image
try.model - Model image (contains only flux components)
try.sumwt - Single pixel image containing sum-of-weights.
(for natural weighting, sensitivity=1/sqrt(sumwt))
try.pb - Primary beam model (values depend on the gridder used)
Widefield projection algorithms (gridder=mosaic,awproject) will
compute the following images too.
try.weight - FT of gridded weights or the
un-normalized sum of PB-square (for all pointings)
Here, PB = sqrt(weight) normalized to a maximum of 1.0
For multi-term wideband imaging, all relevant images above will
have additional .tt0,.tt1, etc suffixes to indicate Taylor terms,
plus the following extra output images.
try.alpha - spectral index
try.alpha.error - estimate of error on spectral index
try.beta - spectral curvature (if nterms \> 2)
Tip : Include a directory name in 'imagename' for all
output images to be sent there instead of the
current working directory : imagename='mydir/try'
Tip : Restarting an imaging run without changing 'imagename'
implies continuation from the existing model image on disk.
- If 'startmodel' was initially specified it needs to be set to ""
for the restart run (or tclean will exit with an error message).
- By default, the residual image and psf will be recomputed
but if no changes were made to relevant parameters between
the runs, set calcres=False, calcpsf=False to resume directly from
the minor cycle without the (unnecessary) first major cycle.
To automatically change 'imagename' with a numerical
increment, set restart=False (see tclean docs for 'restart').
Note : All imaging runs will by default produce restored images.
For a niter=0 run, this will be redundant and can optionally
be turned off via the 'restoration=T/F' parameter.
imsize Number of pixels
example : imsize = [350,250]
imsize = 500 is equivalent to [500,500]
To take proper advantage of internal optimized FFT routines, the
number of pixels must be even and factorizable by 2,3,5,7 only.
cell Cell size
example: cell=['0.5arcsec,'0.5arcsec'] or
cell=['1arcmin', '1arcmin']
cell = '1arcsec' is equivalent to ['1arcsec','1arcsec']
phasecenter Phase center of the image (string or field id); if the phasecenter is the name known major solar system object ('MERCURY', 'VENUS', 'MARS', 'JUPITER', 'SATURN', 'URANUS', 'NEPTUNE', 'PLUTO', 'SUN', 'MOON') or is an ephemerides table then that source is tracked and the background sources get smeared. There is a special case, when phasecenter='TRACKFIELD', which will use the ephemerides or polynomial phasecenter in the FIELD table of the MS's as the source center to track.
example: phasecenter=6
phasecenter='J2000 19h30m00 -40d00m00'
phasecenter='J2000 292.5deg -40.0deg'
phasecenter='J2000 5.105rad -0.698rad'
phasecenter='ICRS 13:05:27.2780 -049.28.04.458'
phasecenter='myComet_ephem.tab'
phasecenter='MOON'
phasecenter='TRACKFIELD'
stokes Stokes Planes to make
default='I'; example: stokes='IQUV';
Options: 'I','Q','U','V','IV','QU','IQ','UV','IQUV','RR','LL','XX','YY','RRLL','XXYY','pseudoI'
Note : Due to current internal code constraints, if any correlation pair
is flagged, by default, no data for that row in the MS will be used.
So, in an MS with XX,YY, if only YY is flagged, neither a
Stokes I image nor an XX image can be made from those data points.
In such a situation, please split out only the unflagged correlation into
a separate MS.
Note : The 'pseudoI' option is a partial solution, allowing Stokes I imaging
when either of the parallel-hand correlations are unflagged.
The remaining constraints shall be removed (where logical) in a future release.
projection Coordinate projection
Examples : SIN, NCP
A list of supported (but untested) projections can be found here :
http://casa.nrao.edu/active/docs/doxygen/html/classcasa_1_1Projection.html#a3d5f9ec787e4eabdce57ab5edaf7c0cd
startmodel Name of starting model image
The contents of the supplied starting model image will be
copied to the imagename.model before the run begins.
example : startmodel = 'singledish.im'
For deconvolver='mtmfs', one image per Taylor term must be provided.
example : startmodel = ['try.model.tt0', 'try.model.tt1']
startmodel = ['try.model.tt0'] will use a starting model only
for the zeroth order term.
startmodel = ['','try.model.tt1'] will use a starting model only
for the first order term.
This starting model can be of a different image shape and size from
what is currently being imaged. If so, an image regrid is first triggered
to resample the input image onto the target coordinate system.
A common usage is to set this parameter equal to a single dish image
Negative components in the model image will be included as is.
[ Note : If an error occurs during image resampling/regridding,
please try using task imregrid to resample the starting model
image onto a CASA image with the target shape and
coordinate system before supplying it via startmodel ]
specmode Spectral definition mode (mfs,cube,cubedata, cubesource)
mode='mfs' : Continuum imaging with only one output image channel.
(mode='cont' can also be used here)
mode='cube' : Spectral line imaging with one or more channels
Parameters start, width,and nchan define the spectral
coordinate system and can be specified either in terms
of channel numbers, frequency or velocity in whatever
spectral frame is specified in 'outframe'.
All internal and output images are made with outframe as the
base spectral frame. However imaging code internally uses the fixed
spectral frame, LSRK for automatic internal software
Doppler tracking so that a spectral line observed over an
extended time range will line up appropriately.
Therefore the output images have additional spectral frame conversion
layer in LSRK on the top the base frame.
(Note : Even if the input parameters are specified in a frame
other than LSRK, the viewer still displays spectral
axis in LSRK by default because of the conversion frame
layer mentioned above. The viewer can be used to relabel
the spectral axis in any desired frame - via the spectral
reference option under axis label properties in the
data display options window.)
mode='cubedata' : Spectral line imaging with one or more channels
There is no internal software Doppler tracking so
a spectral line observed over an extended time range
may be smeared out in frequency. There is strictly
no valid spectral frame with which to label the output
images, but they will list the frame defined in the MS.
mode='cubesource': Spectral line imaging while
tracking moving source (near field or solar system
objects). The velocity of the source is accounted
and the frequency reported is in the source frame.
As there is not SOURCE frame defined,
the frame reported will be REST (as it may not be
in the rest frame emission region may be
moving w.r.t the systemic velocity frame)
reffreq Reference frequency of the output image coordinate system
Example : reffreq='1.5GHz' as a string with units.
By default, it is calculated as the middle of the selected frequency range.
For deconvolver='mtmfs' the Taylor expansion is also done about
this specified reference frequency.
nchan Number of channels in the output image
For default (=-1), the number of channels will be automatically determined
based on data selected by 'spw' with 'start' and 'width'.
It is often easiest to leave nchan at the default value.
example: nchan=100
start First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\')
of output cube images specified by data channel number (integer),
velocity (string with a unit), or frequency (string with a unit).
Default:''; The first channel is automatically determined based on
the 'spw' channel selection and 'width'.
When the channel number is used along with the channel selection
in 'spw' (e.g. spw='0:6~100'),
'start' channel number is RELATIVE (zero-based) to the selected
channels in 'spw'. So for the above example,
start=1 means that the first image channel is the second selected
data channel, which is channel 7.
For specmode='cube', when velocity or frequency is used it is
interpreted with the frame defined in outframe. [The parameters of
the desired output cube can be estimated by using the 'transform'
functionality of 'plotms']
examples: start='5.0km/s'; 1st channel, 5.0km/s in outframe
start='22.3GHz'; 1st channel, 22.3GHz in outframe
width Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\') of output cube images
specified by data channel number (integer), velocity (string with a unit), or
or frequency (string with a unit).
Default:''; data channel width
The sign of width defines the direction of the channels to be incremented.
For width specified in velocity or frequency with '-' in front gives image channels in
decreasing velocity or frequency, respectively.
For specmode='cube', when velocity or frequency is used it is interpreted with
the reference frame defined in outframe.
examples: width='2.0km/s'; results in channels with increasing velocity
width='-2.0km/s'; results in channels with decreasing velocity
width='40kHz'; results in channels with increasing frequency
width=-2; results in channels averaged of 2 data channels incremented from
high to low channel numbers
outframe Spectral reference frame in which to interpret \'start\' and \'width\'
Options: '','LSRK','LSRD','BARY','GEO','TOPO','GALACTO','LGROUP','CMB'
example: outframe='bary' for Barycentric frame
REST -- Rest frequency
LSRD -- Local Standard of Rest (J2000)
-- as the dynamical definition (IAU, [9,12,7] km/s in galactic coordinates)
LSRK -- LSR as a kinematical (radio) definition
-- 20.0 km/s in direction ra,dec = [270,+30] deg (B1900.0)
BARY -- Barycentric (J2000)
GEO --- Geocentric
TOPO -- Topocentric
GALACTO -- Galacto centric (with rotation of 220 km/s in direction l,b = [90,0] deg.
LGROUP -- Local group velocity -- 308km/s towards l,b = [105,-7] deg (F. Ghigo)
CMB -- CMB velocity -- 369.5km/s towards l,b = [264.4, 48.4] deg (F. Ghigo)
DEFAULT = LSRK
veltype Velocity type (radio, z, ratio, beta, gamma, optical)
For start and/or width specified in velocity, specifies the velocity definition
Options: 'radio','optical','z','beta','gamma','optical'
NOTE: the viewer always defaults to displaying the 'radio' frame,
but that can be changed in the position tracking pull down.
The different types (with F = f/f0, the frequency ratio), are:
Z = (-1 + 1/F)
RATIO = (F) *
RADIO = (1 - F)
OPTICAL == Z
BETA = ((1 - F2)/(1 + F2))
GAMMA = ((1 + F2)/2F) *
RELATIVISTIC == BETA (== v/c)
DEFAULT == RADIO
Note that the ones with an '*' have no real interpretation
(although the calculation will proceed) if given as a velocity.
restfreq List of rest frequencies or a rest frequency in a string.
Specify rest frequency to use for output image.
*Currently it uses the first rest frequency in the list for translation of
velocities. The list will be stored in the output images.
Default: []; look for the rest frequency stored in the MS, if not available,
use center frequency of the selected channels
examples: restfreq=['1.42GHz']
restfreq='1.42GHz'
interpolation Spectral interpolation (nearest,linear,cubic)
Interpolation rules to use when binning data channels onto image channels
and evaluating visibility values at the centers of image channels.
Note : 'linear' and 'cubic' interpolation requires data points on both sides of
each image frequency. Errors are therefore possible at edge channels, or near
flagged data channels. When image channel width is much larger than the data
channel width there is nothing much to be gained using linear or cubic thus
not worth the extra computation involved.
perchanweightdensity When calculating weight density for Briggs
style weighting in a cube, this parameter
determines whether to calculate the weight
density for each channel independently
(the default, True)
or a common weight density for all of the selected
data. This parameter has no
meaning for continuum (specmode='mfs') imaging
or for natural and radial weighting schemes.
For cube imaging
perchanweightdensity=True is a recommended
option that provides more uniform
sensitivity per channel for cubes, but with
generally larger psfs than the
perchanweightdensity=False (prior behavior)
option. When using Briggs style weight with
perchanweightdensity=True, the imaging weight
density calculations use only the weights of
data that contribute specifically to that
channel. On the other hand, when
perchanweightdensity=False, the imaging
weight density calculations sum all of the
weights from all of the data channels
selected whose (u,v) falls in a given uv cell
on the weight density grid. Since the
aggregated weights, in any given uv cell,
will change depending on the number of
channels included when imaging, the psf
calculated for a given frequency channel will
also necessarily change, resulting in
variability in the psf for a given frequency
channel when perchanweightdensity=False. In
general, perchanweightdensity=False results
in smaller psfs for the same value of
robustness compared to
perchanweightdensity=True, but the rms noise
as a function of channel varies and increases
toward the edge channels;
perchanweightdensity=True provides more
uniform sensitivity per channel for
cubes. This may make it harder to find
estimates of continuum when
perchanweightdensity=False. If you intend to
image a large cube in many smaller subcubes
and subsequently concatenate, it is advisable
to use perchanweightdensity=True to avoid
surprisingly varying sensitivity and psfs
across the concatenated cube.
gridder Gridding options (standard, wproject, widefield, mosaic, awproject)
The following options choose different gridding convolution
functions for the process of convolutional resampling of the measured
visibilities onto a regular uv-grid prior to an inverse FFT.
Model prediction (degridding) also uses these same functions.
Several wide-field effects can be accounted for via careful choices of
convolution functions. Gridding (degridding) runtime will rise in
proportion to the support size of these convolution functions (in uv-pixels).
standard : Prolate Spheroid with 7x7 uv pixel support size
[ This mode can also be invoked using 'ft' or 'gridft' ]
wproject : W-Projection algorithm to correct for the widefield
non-coplanar baseline effect. [Cornwell et.al 2008]
wprojplanes is the number of distinct w-values at
which to compute and use different gridding convolution
functions (see help for wprojplanes).
Convolution function support size can range
from 5x5 to few 100 x few 100.
[ This mode can also be invoked using 'wprojectft' ]
widefield : Facetted imaging with or without W-Projection per facet.
A set of facets x facets subregions of the specified image
are gridded separately using their respective phase centers
(to minimize max W). Deconvolution is done on the joint
full size image, using a PSF from the first subregion.
wprojplanes=1 : standard prolate spheroid gridder per facet.
wprojplanes > 1 : W-Projection gridder per facet.
nfacets=1, wprojplanes > 1 : Pure W-Projection and no facetting
nfacets=1, wprojplanes=1 : Same as standard,ft,gridft
A combination of facetting and W-Projection is relevant only for
very large fields of view. (In our current version of tclean, this
combination runs only with parallel=False.
mosaic : A-Projection with azimuthally symmetric beams without
sidelobes, beam rotation or squint correction.
Gridding convolution functions per visibility are computed
from FTs of PB models per antenna.
This gridder can be run on single fields as well as mosaics.
VLA : PB polynomial fit model (Napier and Rots, 1982)
EVLA : PB polynomial fit model (Perley, 2015)
ALMA : Airy disks for a 10.7m dish (for 12m dishes) and
6.25m dish (for 7m dishes) each with 0.75m
blockages (Hunter/Brogan 2011). Joint mosaic
imaging supports heterogeneous arrays for ALMA.
Typical gridding convolution function support sizes are
between 7 and 50 depending on the desired
accuracy (given by the uv cell size or image field of view).
[ This mode can also be invoked using 'mosaicft' or 'ftmosaic' ]
awproject : A-Projection with azimuthally asymmetric beams and
including beam rotation, squint correction,
conjugate frequency beams and W-projection.
[Bhatnagar et.al, 2008]
Gridding convolution functions are computed from
aperture illumination models per antenna and optionally
combined with W-Projection kernels and a prolate spheroid.
This gridder can be run on single fields as well as mosaics.
VLA : Uses ray traced model (VLA and EVLA) including feed
leg and subreflector shadows, off-axis feed location
(for beam squint and other polarization effects), and
a Gaussian fit for the feed beams (Ref: Brisken 2009)
ALMA : Similar ray-traced model as above (but the correctness
of its polarization properties remains un-verified).
Typical gridding convolution function support sizes are
between 7 and 50 depending on the desired
accuracy (given by the uv cell size or image field of view).
When combined with W-Projection they can be significantly larger.
[ This mode can also be invoked using 'awprojectft' ]
imagemosaic : (untested implementation)
Grid and iFT each pointing separately and combine the
images as a linear mosaic (weighted by a PB model) in
the image domain before a joint minor cycle.
VLA/ALMA PB models are same as for gridder='mosaicft'
------ Notes on PB models :
(1) Several different sources of PB models are used in the modes
listed above. This is partly for reasons of algorithmic flexibility
and partly due to the current lack of a common beam model
repository or consensus on what beam models are most appropriate.
(2) For ALMA and gridder='mosaic', ray-traced (TICRA) beams
are also available via the vpmanager tool.
For example, call the following before the tclean run.
vp.setpbimage(telescope="ALMA",
compleximage='/home/casa/data/trunk/alma/responses/ALMA_0_DV__0_0_360_0_45_90_348.5_373_373_GHz_ticra2007_VP.im',
antnames=['DV'+'%02d'%k for k in range(25)])
vp.saveastable('mypb.tab')
Then, supply vptable='mypb.tab' to tclean.
( Currently this will work only for non-parallel runs )
------ Note on PB masks :
In tclean, A-Projection gridders (mosaic and awproject) produce a
.pb image and use the 'pblimit' subparameter to decide normalization
cutoffs and construct an internal T/F mask in the .pb and .image images.
However, this T/F mask cannot directly be used during deconvolution
(which needs a 1/0 mask). There are two options for making a pb based
deconvolution mask.
-- Run tclean with niter=0 to produce the .pb, construct a 1/0 image
with the desired threshold (using ia.open('newmask.im');
ia.calc('iif("xxx.pb">0.3,1.0,0.0)');ia.close() for example),
and supply it via the 'mask' parameter in a subsequent run
(with calcres=F and calcpsf=F to restart directly from the minor cycle).
-- Run tclean with usemask='pb' for it to automatically construct
a 1/0 mask from the internal T/F mask from .pb at a fixed 0.2 threshold.
----- Making PBs for gridders other than mosaic,awproject
After the PSF generation, a PB is constructed using the same
models used in gridder='mosaic' but just evaluated in the image
domain without consideration to weights.
facets Number of facets on a side
A set of (facets x facets) subregions of the specified image
are gridded separately using their respective phase centers
(to minimize max W). Deconvolution is done on the joint
full size image, using a PSF from the first subregion/facet.
In our current version of tclean, facets>1 may be used only
with parallel=False.
psfphasecenter For mosaic use psf centered on this
optional direction. You may need to use
this if for example the mosaic does not
have any pointing in the center of the
image. Another reason; as the psf is
approximate for a mosaic, this may help
to deconvolve a non central bright source
well and quickly.
example:
psfphasecenter=6 #center psf on field 6
psfphasecenter='J2000 19h30m00 -40d00m00'
psfphasecenter='J2000 292.5deg -40.0deg'
psfphasecenter='J2000 5.105rad -0.698rad'
psfphasecenter='ICRS 13:05:27.2780 -049.28.04.458'
wprojplanes Number of distinct w-values at which to compute and use different
gridding convolution functions for W-Projection
An appropriate value of wprojplanes depends on the presence/absence
of a bright source far from the phase center, the desired dynamic
range of an image in the presence of a bright far out source,
the maximum w-value in the measurements, and the desired trade off
between accuracy and computing cost.
As a (rough) guide, VLA L-Band D-config may require a
value of 128 for a source 30arcmin away from the phase
center. A-config may require 1024 or more. To converge to an
appropriate value, try starting with 128 and then increasing
it if artifacts persist. W-term artifacts (for the VLA) typically look
like arc-shaped smears in a synthesis image or a shift in source
position between images made at different times. These artifacts
are more pronounced the further the source is from the phase center.
There is no harm in simply always choosing a large value (say, 1024)
but there will be a significant performance cost to doing so, especially
for gridder='awproject' where it is combined with A-Projection.
wprojplanes=-1 is an option for gridder='widefield' or 'wproject'
in which the number of planes is automatically computed.
vptable vpmanager
vptable="" : Choose default beams for different telescopes
ALMA : Airy disks
EVLA : old VLA models.
Other primary beam models can be chosen via the vpmanager tool.
Step 1 : Set up the vpmanager tool and save its state in a table
vp.setpbpoly(telescope='EVLA', coeff=[1.0, -1.529e-3, 8.69e-7, -1.88e-10])
vp.saveastable('myvp.tab')
Step 2 : Supply the name of that table in tclean.
tclean(....., vptable='myvp.tab',....)
Please see the documentation for the vpmanager for more details on how to
choose different beam models. Work is in progress to update the defaults
for EVLA and ALMA.
Note : AWProjection currently does not use this mechanism to choose
beam models. It instead uses ray-traced beams computed from
parameterized aperture illumination functions, which are not
available via the vpmanager. So, gridder='awproject' does not allow
the user to set this parameter.
mosweight When doing Brigg's style weighting (including uniform) to perform the weight density calculation for each field indepedently if True. If False the weight density is calculated from the average uv distribution of all the fields.
aterm Use aperture illumination functions during gridding
This parameter turns on the A-term of the AW-Projection gridder.
Gridding convolution functions are constructed from aperture illumination
function models of each antenna.
psterm Include the Prolate Spheroidal (PS) funtion as the anti-aliasing
operator in the gridding convolution functions used for gridding.
Setting this parameter to true is necessary when aterm is set to
false. It can be set to false when aterm is set to true, though
with this setting effects of aliasing may be there in the image,
particularly near the edges.
When set to true, the .pb images will contain the fourier transform
of the of the PS funtion. The table below enumarates the functional
effects of the psterm, aterm and wprojplanes settings. PB referes to
the Primary Beam and FT() refers to the Fourier transform operation.
Operation aterm psterm wprojplanes Contents of the .pb image
----------------------------------------------------------------------
AW-Projection True True >1 FT(PS) x PB
False PB
A-Projection True True 1 FT(PS) x PB
False PB
W-Projection False True >1 FT(PS)
Standard False True 1 FT(PS)
wbawp Use frequency dependent A-terms
Scale aperture illumination functions appropriately with frequency
when gridding and combining data from multiple channels.
conjbeams Use conjugate frequency for wideband A-terms
While gridding data from one frequency channel, choose a convolution
function from a 'conjugate' frequency such that the resulting baseline
primary beam is approximately constant across frequency. For a system in
which the primary beam scales with frequency, this step will eliminate
instrumental spectral structure from the measured data and leave only the
sky spectrum for the minor cycle to model and reconstruct [Bhatnagar et al., ApJ, 2013].
As a rough guideline for when this is relevant, a source at the half power
point of the PB at the center frequency will see an artificial spectral
index of -1.4 due to the frequency dependence of the PB [Sault and Wieringa, 1994].
If left uncorrected during gridding, this spectral structure must be modeled
in the minor cycle (using the mtmfs algorithm) to avoid dynamic range limits
(of a few hundred for a 2:1 bandwidth).
This works for specmode='mfs' and its value is ignored for cubes
cfcache Convolution function cache directory name
Name of a directory in which to store gridding convolution functions.
This cache is filled at the beginning of an imaging run. This step can be time
consuming but the cache can be reused across multiple imaging runs that
use the same image parameters (cell size, image size , spectral data
selections, wprojplanes, wbawp, psterm, aterm). The effect of the wbawp,
psterm and aterm settings is frozen-in in the cfcache. Using an existing cfcache
made with a different setting of these parameters will not reflect the current
settings.
In a parallel execution, the construction of the cfcache is also parallelized
and the time to compute scales close to linearly with the number of compute
cores used. With the re-computation of Convolution Functions (CF) due to PA
rotation turned-off (the computepastep parameter), the total number of in the
cfcache can be computed as [No. of wprojplanes x No. of selected spectral windows x 4]
By default, cfcache = imagename + '.cf'
usepointing The usepointing flag informs the gridder that it should utilize the pointing table
to use the correct direction in which the antenna is pointing with respect to the pointing phasecenter.
computepastep Parallactic angle interval after the AIFs are recomputed (deg)
This parameter controls the accuracy of the aperture illumination function
used with AProjection for alt-az mount dishes where the AIF rotates on the
sky as the synthesis image is built up. Once the PA in the data changes by
the given interval, AIFs are re-computed at the new PA.
A value of 360.0 deg (the default) implies no re-computation due to PA rotation.
AIFs are computed for the PA value of the first valid data received and used for
all of the data.
rotatepastep Parallactic angle interval after which the nearest AIF is rotated (deg)
Instead of recomputing the AIF for every timestep's parallactic angle,
the nearest existing AIF is used and rotated
after the PA changed by rotatepastep value.
A value of 360.0 deg (the default) disables rotation of the AIF.
For example, computepastep=360.0 and rotatepastep=5.0 will compute
the AIFs at only the starting parallactic angle and all other timesteps will
use a rotated version of that AIF at the nearest 5.0 degree point.
pointingoffsetsigdev Corrections for heterogenous and time-dependent pointing
offsets via AWProjection are controlled by this parameter.
It is a vector of 2 ints or doubles each of which is interpreted
in units of arcsec. Based on the first threshold, a clustering
algorithm is applied to entries from the POINTING subtable
of the MS to determine how distinct antenna groups for which
the pointing offset must be computed separately. The second
number controls how much a pointing change across time can
be ignored and after which an antenna rebinning is required.
Note : The default value of this parameter is [], due a programmatic constraint.
If run with this value, it will internally pick [600,600] and exercise the
option of using large tolerances (10arcmin) on both axes. Please choose
a setting explicitly for runs that need to use this parameter.
Note : This option is available only for gridder='awproject' and usepointing=True and
and has been validated primarily with VLASS on-the-fly mosaic data
where POINTING subtables have been modified after the data are recorded.
Examples of parameter usage :
[100.0,100.0] : Pointing offsets of 100 arcsec or less are considered
small enough to be ignored. Using large values for both
indicates a homogeneous array.
[10.0, 100.0] : Based on entries in the POINTING subtable, antennas
are grouped into clusters based on a 10arcsec bin size.
All antennas in a bin are given a pointing offset calculated
as the average of the offsets of all antennas in the bin.
On the time axis, offset changes upto 100 arcsec will be ignored.
[10.0,10.0] : Calculate separate pointing offsets for each antenna group
(with a 10 arcsec bin size). As a function of time, recalculate
the antenna binning if the POINTING table entries change by
more than 10 arcsec w.r.to the previously computed binning.
[1.0, 1.0] : Tight tolerances will imply a fully heterogenous situation where
each antenna gets its own pointing offset. Also, time-dependent
offset changes greater than 1 arcsec will trigger recomputes of
the phase gradients. This is the most general situation and is also
the most expensive option as it constructs and uses separate
phase gradients for all baselines and timesteps.
For VLASS 1.1 data with two kinds of pointing offsets, the recommended
setting is [ 30.0, 30.0 ].
For VLASS 1.2 data with only the time-dependent pointing offsets, the
recommended setting is [ 300.0, 30.0 ] to turn off the antenna grouping
but to retain the time dependent corrections required from one timestep
to the next.
pblimit PB gain level at which to cut off normalizations
Divisions by .pb during normalizations have a cut off at a .pb gain
level given by pblimit. Outside this limit, image values are set to zero.
Additionally, by default, an internal T/F mask is applied to the .pb, .image and
.residual images to mask out (T) all invalid pixels outside the pblimit area.
Note : This internal T/F mask cannot be used as a deconvolution mask.
To do so, please follow the steps listed above in the Notes for the
'gridder' parameter.
Note : To prevent the internal T/F mask from appearing in anything other
than the .pb and .image.pbcor images, 'pblimit' can be set to a
negative number. The absolute value will still be used as a valid 'pblimit'.
A tclean restart using existing output images on disk that already
have this T/F mask in the .residual and .image but only pblimit set
to a negative value, will remove this mask after the next major cycle.
normtype Normalization type (flatnoise, flatsky, pbsquare)
Gridded (and FT'd) images represent the PB-weighted sky image.
Qualitatively it can be approximated as two instances of the PB
applied to the sky image (one naturally present in the data
and one introduced during gridding via the convolution functions).
xxx.weight : Weight image approximately equal to sum ( square ( pb ) )
xxx.pb : Primary beam calculated as sqrt ( xxx.weight )
normtype='flatnoise' : Divide the raw image by sqrt(.weight) so that
the input to the minor cycle represents the
product of the sky and PB. The noise is 'flat'
across the region covered by each PB.
normtype='flatsky' : Divide the raw image by .weight so that the input
to the minor cycle represents only the sky.
The noise is higher in the outer regions of the
primary beam where the sensitivity is low.
normtype='pbsquare' : No normalization after gridding and FFT.
The minor cycle sees the sky times pb square
deconvolver Name of minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes)
Each of the following algorithms operate on residual images and psfs
from the gridder and produce output model and restored images.
Minor cycles stop and a major cycle is triggered when cyclethreshold
or cycleniter are reached. For all methods, components are picked from
the entire extent of the image or (if specified) within a mask.
hogbom : An adapted version of Hogbom Clean [Hogbom, 1974]
- Find the location of the peak residual
- Add this delta function component to the model image
- Subtract a scaled and shifted PSF of the same size as the image
from regions of the residual image where the two overlap.
- Repeat
clark : An adapted version of Clark Clean [Clark, 1980]
- Find the location of max(I^2+Q^2+U^2+V^2)
- Add delta functions to each stokes plane of the model image
- Subtract a scaled and shifted PSF within a small patch size
from regions of the residual image where the two overlap.
- After several iterations trigger a Clark major cycle to subtract
components from the visibility domain, but without de-gridding.
- Repeat
( Note : 'clark' maps to imagermode='' in the old clean task.
'clark_exp' is another implementation that maps to
imagermode='mosaic' or 'csclean' in the old clean task
but the behavior is not identical. For now, please
use deconvolver='hogbom' if you encounter problems. )
clarkstokes : Clark Clean operating separately per Stokes plane
(Note : 'clarkstokes_exp' is an alternate version. See above.)
multiscale : MultiScale Clean [Cornwell, 2008]
- Smooth the residual image to multiple scale sizes
- Find the location and scale at which the peak occurs
- Add this multiscale component to the model image
- Subtract a scaled,smoothed,shifted PSF (within a small
patch size per scale) from all residual images
- Repeat from step 2
mtmfs : Multi-term (Multi Scale) Multi-Frequency Synthesis [Rau and Cornwell, 2011]
- Smooth each Taylor residual image to multiple scale sizes
- Solve a NTxNT system of equations per scale size to compute
Taylor coefficients for components at all locations
- Compute gradient chi-square and pick the Taylor coefficients
and scale size at the location with maximum reduction in
chi-square
- Add multi-scale components to each Taylor-coefficient
model image
- Subtract scaled,smoothed,shifted PSF (within a small patch size
per scale) from all smoothed Taylor residual images
- Repeat from step 2
mem : Maximum Entropy Method [Cornwell and Evans, 1985]
- Iteratively solve for values at all individual pixels via the
MEM method. It minimizes an objective function of
chi-square plus entropy (here, a measure of difference
between the current model and a flat prior model).
(Note : This MEM implementation is not very robust.
Improvements will be made in the future.)
scales List of scale sizes (in pixels) for multi-scale and mtmfs algorithms.
--> scales=[0,6,20]
This set of scale sizes should represent the sizes
(diameters in units of number of pixels)
of dominant features in the image being reconstructed.
The smallest scale size is recommended to be 0 (point source),
the second the size of the synthesized beam and the third 3-5
times the synthesized beam, etc. For example, if the synthesized
beam is 10" FWHM and cell=2",try scales = [0,5,15].
For numerical stability, the largest scale must be
smaller than the image (or mask) size and smaller than or
comparable to the scale corresponding to the lowest measured
spatial frequency (as a scale size much larger than what the
instrument is sensitive to is unconstrained by the data making
it harder to recovery from errors during the minor cycle).
nterms Number of Taylor coefficients in the spectral model
- nterms=1 : Assume flat spectrum source
- nterms=2 : Spectrum is a straight line with a slope
- nterms=N : A polynomial of order N-1
From a Taylor expansion of the expression of a power law, the
spectral index is derived as alpha = taylorcoeff_1 / taylorcoeff_0
Spectral curvature is similarly derived when possible.
The optimal number of Taylor terms depends on the available
signal to noise ratio, bandwidth ratio, and spectral shape of the
source as seen by the telescope (sky spectrum x PB spectrum).
nterms=2 is a good starting point for wideband EVLA imaging
and the lower frequency bands of ALMA (when fractional bandwidth
is greater than 10%) and if there is at least one bright source for
which a dynamic range of greater than few 100 is desired.
Spectral artifacts for the VLA often look like spokes radiating out from
a bright source (i.e. in the image made with standard mfs imaging).
If increasing the number of terms does not eliminate these artifacts,
check the data for inadequate bandpass calibration. If the source is away
from the pointing center, consider including wide-field corrections too.
(Note : In addition to output Taylor coefficient images .tt0,.tt1,etc
images of spectral index (.alpha), an estimate of error on
spectral index (.alpha.error) and spectral curvature (.beta,
if nterms is greater than 2) are produced.
- These alpha, alpha.error and beta images contain
internal T/F masks based on a threshold computed
as peakresidual/10. Additional masking based on
.alpha/.alpha.error may be desirable.
- .alpha.error is a purely empirical estimate derived
from the propagation of error during the division of
two noisy numbers (alpha = xx.tt1/xx.tt0) where the
'error' on tt1 and tt0 are simply the values picked from
the corresponding residual images. The absolute value
of the error is not always accurate and it is best to interpret
the errors across the image only in a relative sense.)
smallscalebias A numerical control to bias the scales when using multi-scale or mtmfs algorithms.
The peak from each scale's smoothed residual is
multiplied by ( 1 - smallscalebias * scale/maxscale )
to increase or decrease the amplitude relative to other scales,
before the scale with the largest peak is chosen.
Smallscalebias can be varied between -1.0 and 1.0.
A score of 0.0 gives all scales equal weight (default).
A score larger than 0.0 will bias the solution towards smaller scales.
A score smaller than 0.0 will bias the solution towards larger scales.
The effect of smallscalebias is more pronounced when using multi-scale relative to mtmfs.
restoration e.
Construct a restored image : imagename.image by convolving the model
image with a clean beam and adding the residual image to the result.
If a restoringbeam is specified, the residual image is also
smoothed to that target resolution before adding it in.
If a .model does not exist, it will make an empty one and create
the restored image from the residuals ( with additional smoothing if needed ).
With algorithm='mtmfs', this will construct Taylor coefficient maps from
the residuals and compute .alpha and .alpha.error.
restoringbeam ize to use.
- restoringbeam='' or ['']
A Gaussian fitted to the PSF main lobe (separately per image plane).
- restoringbeam='10.0arcsec'
Use a circular Gaussian of this width for all planes
- restoringbeam=['8.0arcsec','10.0arcsec','45deg']
Use this elliptical Gaussian for all planes
- restoringbeam='common'
Automatically estimate a common beam shape/size appropriate for
all planes.
Note : For any restoring beam different from the native resolution
the model image is convolved with the beam and added to
residuals that have been convolved to the same target resolution.
pbcor the output restored image
A new image with extension .image.pbcor will be created from
the evaluation of .image / .pb for all pixels above the specified pblimit.
Note : Stand-alone PB-correction can be triggered by re-running
tclean with the appropriate imagename and with
niter=0, calcpsf=False, calcres=False, pbcor=True, vptable='vp.tab'
( where vp.tab is the name of the vpmanager file.
See the inline help for the 'vptable' parameter )
Note : Multi-term PB correction that includes a correction for the
spectral index of the PB has not been enabled for the 4.7 release.
Please use the widebandpbcor task instead.
( Wideband PB corrections are required when the amplitude of the
brightest source is known accurately enough to be sensitive
to the difference in the PB gain between the upper and lower
end of the band at its location. As a guideline, the artificial spectral
index due to the PB is -1.4 at the 0.5 gain level and less than -0.2
at the 0.9 gain level at the middle frequency )
outlierfile Name of outlier-field image definitions
A text file containing sets of parameter=value pairs,
one set per outlier field.
Example : outlierfile='outs.txt'
Contents of outs.txt :
imagename=tst1
nchan=1
imsize=[80,80]
cell=[8.0arcsec,8.0arcsec]
phasecenter=J2000 19:58:40.895 +40.55.58.543
mask=circle[[40pix,40pix],10pix]
imagename=tst2
nchan=1
imsize=[100,100]
cell=[8.0arcsec,8.0arcsec]
phasecenter=J2000 19:58:40.895 +40.56.00.000
mask=circle[[60pix,60pix],20pix]
The following parameters are currently allowed to be different between
the main field and the outlier fields (i.e. they will be recognized if found
in the outlier text file). If a parameter is not listed, the value is picked from
what is defined in the main task input.
imagename, imsize, cell, phasecenter, startmodel, mask
specmode, nchan, start, width, nterms, reffreq,
gridder, deconvolver, wprojplanes
Note : 'specmode' is an option, so combinations of mfs and cube
for different image fields, for example, are supported.
'deconvolver' and 'gridder' are also options that allow different
imaging or deconvolution algorithm per image field.
For example, multiscale with wprojection and 16 w-term planes
on the main field and mtmfs with nterms=3 and wprojection
with 64 planes on a bright outlier source for which the frequency
dependence of the primary beam produces a strong effect that
must be modeled. The traditional alternative to this approach is
to first image the outlier, subtract it out of the data (uvsub) and
then image the main field.
Note : If you encounter a use-case where some other parameter needs
to be allowed in the outlier file (and it is logical to do so), please
send us feedback. The above is an initial list.
weighting Weighting scheme (natural,uniform,briggs,superuniform,radial, briggsabs, briggsbwtaper)
During gridding of the dirty or residual image, each visibility value is
multiplied by a weight before it is accumulated on the uv-grid.
The PSF's uv-grid is generated by gridding only the weights (weightgrid).
weighting='natural' : Gridding weights are identical to the data weights
from the MS. For visibilities with similar data weights,
the weightgrid will follow the sample density
pattern on the uv-plane. This weighting scheme
provides the maximum imaging sensitivity at the
expense of a possibly fat PSF with high sidelobes.
It is most appropriate for detection experiments
where sensitivity is most important.
weighting='uniform' : Gridding weights per visibility data point are the
original data weights divided by the total weight of
all data points that map to the same uv grid cell :
' data_weight / total_wt_per_cell '.
The weightgrid is as close to flat as possible resulting
in a PSF with a narrow main lobe and suppressed
sidelobes. However, since heavily sampled areas of
the uv-plane get down-weighted, the imaging
sensitivity is not as high as with natural weighting.
It is most appropriate for imaging experiments where
a well behaved PSF can help the reconstruction.
weighting='briggs' : Gridding weights per visibility data point are given by
'data_weight / ( A *total_wt_per_cell + B ) ' where
A and B vary according to the 'robust' parameter.
robust = -2.0 maps to A=1,B=0 or uniform weighting.
robust = +2.0 maps to natural weighting.
(robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.)
Robust/Briggs weighting generates a PSF that can
vary smoothly between 'natural' and 'uniform' and
allow customized trade-offs between PSF shape and
imaging sensitivity.
weighting='briggsabs' : Experimental option.
Same as Briggs except the formula is different A=
robust*robust and B is dependent on the
noise per visibility estimated. Giving noise='0Jy'
is a not a reasonable option.
In this mode (or formula) robust values
from -2.0 to 0.0 only make sense (2.0 and
-2.0 will get the same weighting)
weighting='superuniform' : This is similar to uniform weighting except that
the total_wt_per_cell is replaced by the
total_wt_within_NxN_cells around the uv cell of
interest. ( N = subparameter 'npixels' )
This method tends to give a PSF with inner
sidelobes that are suppressed as in uniform
weighting but with far-out sidelobes closer to
natural weighting. The peak sensitivity is also
closer to natural weighting.
weighting='radial' : Gridding weights are given by ' data_weight * uvdistance '
This method approximately minimizes rms sidelobes
for an east-west synthesis array.
weighting='briggsbwtaper' : A modified version of Briggs weighting for cubes where an inverse uv taper,
which is proportional to the fractional bandwidth of the entire cube,
is applied per channel. The objective is to modify cube (perchanweightdensity = True)
imaging weights to have a similar density to that of the continuum imaging weights.
This is currently an experimental weighting scheme being developed for ALMA.
For more details on weighting please see Chapter3
of Dan Briggs' thesis (http://www.aoc.nrao.edu/dissertations/dbriggs)
robust Robustness parameter for Briggs weighting.
robust = -2.0 maps to uniform weighting.
robust = +2.0 maps to natural weighting.
(robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.)
noise noise parameter for briggs abs mode weighting
npixels Number of pixels to determine uv-cell size for super-uniform weighting
(0 defaults to -/+ 3 pixels)
npixels -- uv-box used for weight calculation
a box going from -npixel/2 to +npixel/2 on each side
around a point is used to calculate weight density.
npixels=2 goes from -1 to +1 and covers 3 pixels on a side.
npixels=0 implies a single pixel, which does not make sense for
superuniform weighting. Therefore, if npixels=0 it will
be forced to 6 (or a box of -3pixels to +3pixels) to cover
7 pixels on a side.
uvtaper uv-taper on outer baselines in uv-plane
Apply a Gaussian taper in addition to the weighting scheme specified
via the 'weighting' parameter. Higher spatial frequencies are weighted
down relative to lower spatial frequencies to suppress artifacts
arising from poorly sampled areas of the uv-plane. It is equivalent to
smoothing the PSF obtained by other weighting schemes and can be
specified either as a Gaussian in uv-space (eg. units of lambda)
or as a Gaussian in the image domain (eg. angular units like arcsec).
uvtaper = [bmaj, bmin, bpa]
NOTE: the on-sky FWHM in arcsec is roughly the uv taper/200 (klambda).
default: uvtaper=[]; no Gaussian taper applied
example: uvtaper=['5klambda'] circular taper
FWHM=5 kilo-lambda
uvtaper=['5klambda','3klambda','45.0deg']
uvtaper=['10arcsec'] on-sky FWHM 10 arcseconds
uvtaper=['300.0'] default units are lambda
in aperture plane
niter Maximum number of iterations
A stopping criterion based on total iteration count.
Currently the parameter type is defined as an integer therefore the integer value
larger than 2147483647 will not be set properly as it causes an overflow.
Iterations are typically defined as the selecting one flux component
and partially subtracting it out from the residual image.
niter=0 : Do only the initial major cycle (make dirty image, psf, pb, etc)
niter larger than zero : Run major and minor cycles.
Note : Global stopping criteria vs major-cycle triggers
In addition to global stopping criteria, the following rules are
used to determine when to terminate a set of minor cycle iterations
and trigger major cycles [derived from Cotton-Schwab Clean, 1984]
'cycleniter' : controls the maximum number of iterations per image
plane before triggering a major cycle.
'cyclethreshold' : Automatically computed threshold related to the
max sidelobe level of the PSF and peak residual.
Divergence, detected as an increase of 10% in peak residual from the
minimum so far (during minor cycle iterations)
The first criterion to be satisfied takes precedence.
Note : Iteration counts for cubes or multi-field images :
For images with multiple planes (or image fields) on which the
deconvolver operates in sequence, iterations are counted across
all planes (or image fields). The iteration count is compared with
'niter' only after all channels/planes/fields have completed their
minor cycles and exited either due to 'cycleniter' or 'cyclethreshold'.
Therefore, the actual number of iterations reported in the logger
can sometimes be larger than the user specified value in 'niter'.
For example, with niter=100, cycleniter=20,nchan=10,threshold=0,
a total of 200 iterations will be done in the first set of minor cycles
before the total is compared with niter=100 and it exits.
Note : Additional global stopping criteria include
- no change in peak residual across two major cycles
- a 50% or more increase in peak residual across one major cycle
gain Loop gain
Fraction of the source flux to subtract out of the residual image
for the CLEAN algorithm and its variants.
A low value (0.2 or less) is recommended when the sky brightness
distribution is not well represented by the basis functions used by
the chosen deconvolution algorithm. A higher value can be tried when
there is a good match between the true sky brightness structure and
the basis function shapes. For example, for extended emission,
multiscale clean with an appropriate set of scale sizes will tolerate
a higher loop gain than Clark clean (for example).
threshold Stopping threshold (number in units of Jy, or string)
A global stopping threshold that the peak residual (within clean mask)
across all image planes is compared to.
threshold = 0.005 : 5mJy
threshold = '5.0mJy'
Note : A 'cyclethreshold' is internally computed and used as a major cycle
trigger. It is related what fraction of the PSF can be reliably
used during minor cycle updates of the residual image. By default
the minor cycle iterations terminate once the peak residual reaches
the first sidelobe level of the brightest source.
'cyclethreshold' is computed as follows using the settings in
parameters 'cyclefactor','minpsffraction','maxpsffraction','threshold' :
psf_fraction = max_psf_sidelobe_level * 'cyclefactor'
psf_fraction = max(psf_fraction, 'minpsffraction');
psf_fraction = min(psf_fraction, 'maxpsffraction');
cyclethreshold = peak_residual * psf_fraction
cyclethreshold = max( cyclethreshold, 'threshold' )
If nsigma is set (>0.0), the N-sigma threshold is calculated (see
the description under nsigma), then cyclethreshold is further modified as,
cyclethreshold = max( cyclethreshold, nsgima_threshold )
'cyclethreshold' is made visible and editable only in the
interactive GUI when tclean is run with interactive=True.
nsigma Multiplicative factor for rms-based threshold stopping
N-sigma threshold is calculated as nsigma * rms value per image plane determined
from a robust statistics. For nsigma > 0.0, in a minor cycle, a maximum of the two values,
the N-sigma threshold and cyclethreshold, is used to trigger a major cycle
(see also the descreption under 'threshold').
Set nsigma=0.0 to preserve the previous tclean behavior without this feature.
The top level parameter, fastnoise is relevant for the rms noise calculation which is used
to determine the threshold.
The parameter 'nsigma' may be an int, float, or a double.
cycleniter Maximum number of minor-cycle iterations (per plane) before triggering
a major cycle
For example, for a single plane image, if niter=100 and cycleniter=20,
there will be 5 major cycles after the initial one (assuming there is no
threshold based stopping criterion). At each major cycle boundary, if
the number of iterations left over (to reach niter) is less than cycleniter,
it is set to the difference.
Note : cycleniter applies per image plane, even if cycleniter x nplanes
gives a total number of iterations greater than 'niter'. This is to
preserve consistency across image planes within one set of minor
cycle iterations.
cyclefactor Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold.
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
cyclefactor=1.0 results in a cyclethreshold at the first sidelobe level of
the brightest source in the residual image before the minor cycle starts.
cyclefactor=0.5 allows the minor cycle to go deeper.
cyclefactor=2.0 triggers a major cycle sooner.
minpsffraction PSF fraction that marks the max depth of cleaning in the minor cycle
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
For example, minpsffraction=0.5 will stop cleaning at half the height of
the peak residual and trigger a major cycle earlier.
maxpsffraction PSF fraction that marks the minimum depth of cleaning in the minor cycle
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
For example, maxpsffraction=0.8 will ensure that at least the top 20
percent of the source will be subtracted out in the minor cycle even if
the first PSF sidelobe is at the 0.9 level (an extreme example), or if the
cyclefactor is set too high for anything to get cleaned.
interactive Modify masks and parameters at runtime
interactive=True will trigger an interactive GUI at every major cycle
boundary (after the major cycle and before the minor cycle).
The interactive mode is currently not available for parallel cube imaging (please also
refer to the Note under the documentation for 'parallel' below).
Options for runtime parameter modification are :
Interactive clean mask : Draw a 1/0 mask (appears as a contour) by hand.
If a mask is supplied at the task interface or if
automasking is invoked, the current mask is
displayed in the GUI and is available for manual
editing.
Note : If a mask contour is not visible, please
check the cursor display at the bottom of
GUI to see which parts of the mask image
have ones and zeros. If the entire mask=1
no contours will be visible.
Operation buttons : -- Stop execution now (restore current model and exit)
-- Continue on until global stopping criteria are reached
without stopping for any more interaction
-- Continue with minor cycles and return for interaction
after the next major cycle.
Iteration control : -- max cycleniter : Trigger for the next major cycle
The display begins with
[ min( cycleniter, niter - itercount ) ]
and can be edited by hand.
-- iterations left : The display begins with [niter-itercount ]
and can be edited to increase or
decrease the total allowed niter.
-- threshold : Edit global stopping threshold
-- cyclethreshold : The display begins with the
automatically computed value
(see Note in help for 'threshold'),
and can be edited by hand.
All edits will be reflected in the log messages that appear
once minor cycles begin.
[ For scripting purposes, replacing True/False with 1/0 will get tclean to
return an imaging summary dictionary to python ]
usemask Type of mask(s) to be used for deconvolution
user: (default) mask image(s) or user specified region file(s) or string CRTF expression(s)
subparameters: mask, pbmask
pb: primary beam mask
subparameter: pbmask
Example: usemask="pb", pbmask=0.2
Construct a mask at the 0.2 pb gain level.
(Currently, this option will work only with
gridders that produce .pb (i.e. mosaic and awproject)
or if an externally produced .pb image exists on disk)
auto-multithresh : auto-masking by multiple thresholds for deconvolution
subparameters : sidelobethreshold, noisethreshold, lownoisethreshold, negativethrehsold, smoothfactor,
minbeamfrac, cutthreshold, pbmask, growiterations, dogrowprune, minpercentchange, verbose
Additional top level parameter relevant to auto-multithresh: fastnoise
if pbmask is >0.0, the region outside the specified pb gain level is excluded from
image statistics in determination of the threshold.
Note: By default the intermediate mask generated by automask at each deconvolution cycle
is over-written in the next cycle but one can save them by setting
the environment variable, SAVE_ALL_AUTOMASKS="true".
(e.g. in the CASA prompt, os.environ['SAVE_ALL_AUTOMASKS']="true" )
The saved CASA mask image name will be imagename.mask.autothresh#, where
# is the iteration cycle number.
mask Mask (a list of image name(s) or region file(s) or region string(s)
The name of a CASA image or region file or region string that specifies
a 1/0 mask to be used for deconvolution. Only locations with value 1 will
be considered for the centers of flux components in the minor cycle.
If regions specified fall completely outside of the image, tclean will throw an error.
Manual mask options/examples :
mask='xxx.mask' : Use this CASA image named xxx.mask and containing
ones and zeros as the mask.
If the mask is only different in spatial coordinates from what is being made
it will be resampled to the target coordinate system before being used.
The mask has to have the same shape in velocity and Stokes planes
as the output image. Exceptions are single velocity and/or single
Stokes plane masks. They will be expanded to cover all velocity and/or
Stokes planes of the output cube.
[ Note : If an error occurs during image resampling or
if the expected mask does not appear, please try
using tasks 'imregrid' or 'makemask' to resample
the mask image onto a CASA image with the target
shape and coordinates and supply it via the 'mask'
parameter. ]
mask='xxx.crtf' : A text file with region strings and the following on the first line
( #CRTFv0 CASA Region Text Format version 0 )
This is the format of a file created via the viewer's region
tool when saved in CASA region file format.
mask='circle[[40pix,40pix],10pix]' : A CASA region string.
mask=['xxx.mask','xxx.crtf', 'circle[[40pix,40pix],10pix]'] : a list of masks
Note : Mask images for deconvolution must contain 1 or 0 in each pixel.
Such a mask is different from an internal T/F mask that can be
held within each CASA image. These two types of masks are not
automatically interchangeable, so please use the makemask task
to copy between them if you need to construct a 1/0 based mask
from a T/F one.
Note : Work is in progress to generate more flexible masking options and
enable more controls.
pbmask Sub-parameter for usemask='auto-multithresh': primary beam mask
Examples : pbmask=0.0 (default, no pb mask)
pbmask=0.2 (construct a mask at the 0.2 pb gain level)
sidelobethreshold Sub-parameter for "auto-multithresh": mask threshold based on sidelobe levels: sidelobethreshold * max_sidelobe_level * peak residual
noisethreshold Sub-parameter for "auto-multithresh": mask threshold based on the noise level: noisethreshold * rms + location (=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
lownoisethreshold Sub-parameter for "auto-multithresh": mask threshold to grow previously masked regions via binary dilation: lownoisethreshold * rms in residual image + location (=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
negativethreshold Sub-parameter for "auto-multithresh": mask threshold for negative features: -1.0* negativethreshold * rms + location(=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
smoothfactor Sub-parameter for "auto-multithresh": smoothing factor in a unit of the beam
minbeamfrac Sub-parameter for "auto-multithresh": minimum beam fraction in size to prune masks smaller than mimbeamfrac * beam
<=0.0 : No pruning
cutthreshold Sub-parameter for "auto-multithresh": threshold to cut the smoothed mask to create a final mask: cutthreshold * peak of the smoothed mask
growiterations Sub-parameter for "auto-multithresh": Maximum number of iterations to perform using binary dilation for growing the mask
dogrowprune Experimental sub-parameter for "auto-multithresh": Do pruning on the grow mask
minpercentchange If the change in the mask size in a particular channel is less than minpercentchange, stop masking that channel in subsequent cycles. This check is only applied when noise based threshold is used and when the previous clean major cycle had a cyclethreshold value equal to the clean threshold. Values equal to -1.0 (or any value less than 0.0) will turn off this check (the default). Automask will still stop masking if the current channel mask is an empty mask and the noise threshold was used to determine the mask.
verbose he summary of automasking at the end of each automasking process
is printed in the logger. Following information per channel will be listed in the summary.
chan: channel number
masking?: F - stop updating automask for the subsequent iteration cycles
RMS: robust rms noise
peak: peak in residual image
thresh_type: type of threshold used (noise or sidelobe)
thresh_value: the value of threshold used
N_reg: number of the automask regions
N_pruned: number of the automask regions removed by pruning
N_grow: number of the grow mask regions
N_grow_pruned: number of the grow mask regions removed by pruning
N_neg_pix: number of pixels for negative mask regions
Note that for a large cube, extra logging may slow down the process.
fastnoise mask (user='multi-autothresh') and/or n-sigma stopping threshold (nsigma>0.0) are/is used. If it is set to True, a simpler but faster noise calucation is used.
In this case, the threshold values are determined based on classic statistics (using all
unmasked pixels for the calculations).
If it is set to False, the new noise calculation
method is used based on pre-existing mask.
Case 1: no exiting mask
Calculate image statistics using Chauvenet algorithm
Case 2: there is an existing mask
Calculate image statistics by classical method on the region
outside the mask and inside the primary beam mask.
In all cases above RMS noise is calculated from MAD.
restart images (and start from an existing model image)
or automatically increment the image name and make a new image set.
True : Re-use existing images. If imagename.model exists the subsequent
run will start from this model (i.e. predicting it using current gridder
settings and starting from the residual image). Care must be taken
when combining this option with startmodel. Currently, only one or
the other can be used.
startmodel='', imagename.model exists :
- Start from imagename.model
startmodel='xxx', imagename.model does not exist :
- Start from startmodel
startmodel='xxx', imagename.model exists :
- Exit with an error message requesting the user to pick
only one model. This situation can arise when doing one
run with startmodel='xxx' to produce an output
imagename.model that includes the content of startmodel,
and wanting to restart a second run to continue deconvolution.
Startmodel should be set to '' before continuing.
If any change in the shape or coordinate system of the image is
desired during the restart, please change the image name and
use the startmodel (and mask) parameter(s) so that the old model
(and mask) can be regridded to the new coordinate system before starting.
False : A convenience feature to increment imagename with '_1', '_2',
etc as suffixes so that all runs of tclean are fresh starts (without
having to change the imagename parameter or delete images).
This mode will search the current directory for all existing
imagename extensions, pick the maximum, and adds 1.
For imagename='try' it will make try.psf, try_2.psf, try_3.psf, etc.
This also works if you specify a directory name in the path :
imagename='outdir/try'. If './outdir' does not exist, it will create it.
Then it will search for existing filenames inside that directory.
If outlier fields are specified, the incrementing happens for each
of them (since each has its own 'imagename'). The counters are
synchronized across imagefields, to make it easier to match up sets
of output images. It adds 1 to the 'max id' from all outlier names
on disk. So, if you do two runs with only the main field
(imagename='try'), and in the third run you add an outlier with
imagename='outtry', you will get the following image names
for the third run : 'try_3' and 'outtry_3' even though
'outry' and 'outtry_2' have not been used.
savemodel Options to save model visibilities (none, virtual, modelcolumn)
Often, model visibilities must be created and saved in the MS
to be later used for self-calibration (or to just plot and view them).
none : Do not save any model visibilities in the MS. The MS is opened
in readonly mode.
Model visibilities can be predicted in a separate step by
restarting tclean with niter=0,savemodel=virtual or modelcolumn
and not changing any image names so that it finds the .model on
disk (or by changing imagename and setting startmodel to the
original imagename).
virtual : In the last major cycle, save the image model and state of the
gridder used during imaging within the SOURCE subtable of the
MS. Images required for de-gridding will also be stored internally.
All future references to model visibilities will activate the
(de)gridder to compute them on-the-fly. This mode is useful
when the dataset is large enough that an additional model data
column on disk may be too much extra disk I/O, when the
gridder is simple enough that on-the-fly recomputing of the
model visibilities is quicker than disk I/O.
For e.g. that gridder='awproject' does not support virtual model.
modelcolumn : In the last major cycle, save predicted model visibilities
in the MODEL_DATA column of the MS. This mode is useful when
the de-gridding cost to produce the model visibilities is higher
than the I/O required to read the model visibilities from disk.
This mode is currently required for gridder='awproject'.
This mode is also required for the ability to later pull out
model visibilities from the MS into a python array for custom
processing.
Note 1 : The imagename.model image on disk will always be constructed
if the minor cycle runs. This savemodel parameter applies only to
model visibilities created by de-gridding the model image.
Note 2 : It is possible for an MS to have both a virtual model
as well as a model_data column, but under normal operation,
the last used mode will get triggered. Use the delmod task to
clear out existing models from an MS if confusion arises.
Note 3: when parallel=True, use savemodel='none'; Other options are not yet ready
for use in parallel. If model visibilities need to be saved (virtual or modelcolumn):
please run tclean in serial mode with niter=0; after the parallel run
calcres Calculate initial residual image
This parameter controls what the first major cycle does.
calcres=False with niter greater than 0 will assume that
a .residual image already exists and that the minor cycle can
begin without recomputing it.
calcres=False with niter=0 implies that only the PSF will be made
and no data will be gridded.
calcres=True requires that calcpsf=True or that the .psf and .sumwt
images already exist on disk (for normalization purposes).
Usage example : For large runs (or a pipeline scripts) it may be
useful to first run tclean with niter=0 to create
an initial .residual to look at and perhaps make
a custom mask for. Imaging can be resumed
without recomputing it.
calcpsf Calculate PSF
This parameter controls what the first major cycle does.
calcpsf=False will assume that a .psf image already exists
and that the minor cycle can begin without recomputing it.
psfcutoff When the .psf image is created a 2 dimensional Gaussian is fit to the main lobe of the PSF.
Which pixels in the PSF are fitted is determined by psfcutoff.
The default value of psfcutoff is 0.35 and can varied from 0.01 to 0.99.
Fitting algorithm:
- A region of 41 x 41 pixels around the peak of the PSF is compared against the psfcutoff.
Sidelobes are ignored by radially searching from the PSF peak.
- Calculate the bottom left corner (blc) and top right corner (trc) from the points. Expand blc and trc with a number of pixels (5).
- Create a new sub-matrix from blc and trc.
- Interpolate matrix to a target number of points (3001) using CUBIC spline.
- All the non-sidelobe points, in the interpolated matrix, that are above the psfcutoff are used to fit a Gaussian.
A Levenberg-Marquardt algorithm is used.
- If the fitting fails the algorithm is repeated with the psfcutoff decreased (psfcutoff=psfcutoff/1.5).
A message in the log will apear if the fitting fails along with the new value of psfcutoff.
This will be done up to 50 times if fitting fails.
This Gaussian beam is defined by a major axis, minor axis, and position angle.
During the restoration process, this Gaussian beam is used as the Clean beam.
Varying psfcutoff might be useful for producing a better fit for highly non-Gaussian PSFs, however, the resulting fits should be carefully checked.
This parameter should rarely be changed.
(This is not the support size for clark clean.)
parallel Run major cycles in parallel (this feature is experimental)
Parallel tclean will run only if casa has already been started using mpirun.
Please refer to HPC documentation for details on how to start this on your system.
Example : mpirun -n 3 -xterm 0 `which casa`
Continuum Imaging :
- Data are partitioned (in time) into NProc pieces
- Gridding/iFT is done separately per partition
- Images (and weights) are gathered and then normalized
- One non-parallel minor cycle is run
- Model image is scattered to all processes
- Major cycle is done in parallel per partition
Cube Imaging :
- Data and Image coordinates are partitioned (in freq) into NProc pieces
- Each partition is processed independently (major and minor cycles)
- All processes are synchronized at major cycle boundaries for convergence checks
- At the end, cubes from all partitions are concatenated along the spectral axis
Note 1 : Iteration control for cube imaging is independent per partition.
- There is currently no communication between them to synchronize
information such as peak residual and cyclethreshold. Therefore,
different chunks may trigger major cycles at different levels.
- For cube imaging in parallel, there is currently no interactive masking.
(Proper synchronization of iteration control is work in progress.)
[1;42mRETURNS[1;m void
--------- examples -----------------------------------------------------------
This is the first release of our refactored imager code. Although most features have
been used and validated, there are many details that have not been thoroughly tested.
Feedback will be much appreciated.
Usage Examples :
-----------------------
(A) A suite of test programs that demo all usable modes of tclean on small test datasets
https://svn.cv.nrao.edu/svn/casa/branches/release-4_5/gcwrap/python/scripts/tests/test_refimager.py
(B) A set of demo examples for ALMA imaging
https://casaguides.nrao.edu/index.php/TCLEAN_and_ALMA
"""
_info_group_ = """imaging"""
_info_desc_ = """Parallelized tclean in consecutive time steps"""
__schema = {'vis': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imageprefix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagesuffix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'ncpu': {'type': 'cInt'}, 'twidth': {'type': 'cInt'}, 'doreg': {'type': 'cBool'}, 'usephacenter': {'type': 'cBool'}, 'reftime': {'type': 'cStr', 'coerce': _coerce.to_str}, 'toTb': {'type': 'cBool'}, 'sclfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'subregion': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'docompress': {'type': 'cBool'}, 'overwrite': {'type': 'cBool'}, 'selectdata': {'type': 'cBool'}, 'field': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'spw': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'timerange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'uvrange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'antenna': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'scan': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'observation': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cInt'}]}, 'intent': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'datacolumn': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagename': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imsize': {'anyof': [{'type': 'cInt'}, {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}]}, 'cell': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cFloat', 'coerce': _coerce.to_float}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, {'type': 'cInt'}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'phasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'stokes': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'I', 'IQUV', 'UV', 'RRLL', 'IQ', 'V', 'pseudoI', 'QU', 'YY', 'RR', 'Q', 'U', 'IV', 'XX', 'XXYY', 'LL' ]}, 'projection': {'type': 'cStr', 'coerce': _coerce.to_str}, 'startmodel': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'specmode': {'type': 'cVariant', 'coerce': [_coerce.to_variant] # <allowed> IS NOT ALLOWED FOR A PARAMETER OF TYPE any
}, 'reffreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nchan': {'type': 'cInt'}, 'start': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'width': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'outframe': {'type': 'cStr', 'coerce': _coerce.to_str}, 'veltype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'restfreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'interpolation': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'nearest', 'linear', 'cubic' ]}, 'perchanweightdensity': {'type': 'cBool'}, 'gridder': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'widefield', 'wproject', 'imagemosaic', 'standard', 'awproject', 'wprojectft', 'mosaicft', 'ft', 'ftmosaic', 'mosaic', 'awprojectft', 'gridft' ]}, 'facets': {'type': 'cInt'}, 'psfphasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'wprojplanes': {'type': 'cInt'}, 'vptable': {'type': 'cStr', 'coerce': _coerce.to_str}, 'mosweight': {'type': 'cBool'}, 'aterm': {'type': 'cBool'}, 'psterm': {'type': 'cBool'}, 'wbawp': {'type': 'cBool'}, 'conjbeams': {'type': 'cBool'}, 'cfcache': {'type': 'cStr', 'coerce': _coerce.to_str}, 'usepointing': {'type': 'cBool'}, 'computepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'rotatepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'pointingoffsetsigdev': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'pblimit': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'normtype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'deconvolver': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'clarkstokes_exp', 'mtmfs', 'mem', 'clarkstokes', 'hogbom', 'clark_exp', 'clark', 'multiscale' ]}, 'scales': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'nterms': {'type': 'cInt'}, 'smallscalebias': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'restoration': {'type': 'cBool'}, 'restoringbeam': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbcor': {'type': 'cBool'}, 'outlierfile': {'type': 'cStr', 'coerce': _coerce.to_str}, 'weighting': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'briggsabs', 'briggs', 'briggsbwtaper', 'natural', 'radial', 'superuniform', 'uniform' ]}, 'robust': {'type': 'cFloat', 'coerce': _coerce.to_float, 'min': -2.0, 'max': 2.0}, 'noise': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'npixels': {'type': 'cInt'}, 'uvtaper': {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, 'niter': {'type': 'cInt'}, 'gain': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'threshold': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nsigma': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cycleniter': {'type': 'cInt'}, 'cyclefactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'maxpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'interactive': {'anyof': [{'type': 'cBool'}, {'type': 'cInt'}]}, 'usemask': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'user', 'pb', 'auto-multithresh' ]}, 'mask': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbmask': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'sidelobethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'noisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'lownoisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'negativethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'smoothfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minbeamfrac': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cutthreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'growiterations': {'type': 'cInt'}, 'dogrowprune': {'type': 'cBool'}, 'minpercentchange': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'verbose': {'type': 'cBool'}, 'fastnoise': {'type': 'cBool'}, 'restart': {'type': 'cBool'}, 'savemodel': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'none', 'virtual', 'modelcolumn' ]}, 'calcres': {'type': 'cBool'}, 'calcpsf': {'type': 'cBool'}, 'psfcutoff': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'parallel': {'type': 'cBool'}}
def __init__(self):
self.__stdout = None
self.__stderr = None
self.__root_frame_ = None
def __globals_(self):
if self.__root_frame_ is None:
self.__root_frame_ = _find_frame( )
assert self.__root_frame_ is not None, "could not find CASAshell global frame"
return self.__root_frame_
def __to_string_(self,value):
if type(value) is str:
return "'%s'" % value
else:
return str(value)
def __validate_(self,doc,schema):
return _pc.validate(doc,schema)
def __do_inp_output(self,param_prefix,description_str,formatting_chars):
out = self.__stdout or sys.stdout
description = description_str.split( )
prefix_width = 23 + 23 + 4
output = [ ]
addon = ''
first_addon = True
while len(description) > 0:
## starting a new line.....................................................................
if len(output) == 0:
## for first line add parameter information............................................
if len(param_prefix)-formatting_chars > prefix_width - 1:
output.append(param_prefix)
continue
addon = param_prefix + ' #'
first_addon = True
addon_formatting = formatting_chars
else:
## for subsequent lines space over prefix width........................................
addon = (' ' * prefix_width) + '#'
first_addon = False
addon_formatting = 0
## if first word of description puts us over the screen width, bail........................
if len(addon + description[0]) - addon_formatting + 1 > self.term_width:
## if we're doing the first line make sure it's output.................................
if first_addon: output.append(addon)
break
while len(description) > 0:
## if the next description word puts us over break for the next line...................
if len(addon + description[0]) - addon_formatting + 1 > self.term_width: break
addon = addon + ' ' + description[0]
description.pop(0)
output.append(addon)
out.write('\n'.join(output) + '\n')
#--------- return nonsubparam values ----------------------------------------------
def __phasecenter_dflt( self, glb ):
return ''
def __phasecenter( self, glb ):
if 'phasecenter' in glb: return glb['phasecenter']
return ''
def __projection_dflt( self, glb ):
return 'SIN'
def __projection( self, glb ):
if 'projection' in glb: return glb['projection']
return 'SIN'
def __vis_dflt( self, glb ):
return ''
def __vis( self, glb ):
if 'vis' in glb: return glb['vis']
return ''
def __imagesuffix_dflt( self, glb ):
return ''
def __imagesuffix( self, glb ):
if 'imagesuffix' in glb: return glb['imagesuffix']
return ''
def __parallel_dflt( self, glb ):
return False
def __parallel( self, glb ):
if 'parallel' in glb: return glb['parallel']
return False
def __twidth_dflt( self, glb ):
return int(1)
def __twidth( self, glb ):
if 'twidth' in glb: return glb['twidth']
return int(1)
def __datacolumn_dflt( self, glb ):
return 'corrected'
def __datacolumn( self, glb ):
if 'datacolumn' in glb: return glb['datacolumn']
return 'corrected'
def __restart_dflt( self, glb ):
return True
def __restart( self, glb ):
if 'restart' in glb: return glb['restart']
return True
def __cell_dflt( self, glb ):
return [ ]
def __cell( self, glb ):
if 'cell' in glb: return glb['cell']
return [ ]
def __startmodel_dflt( self, glb ):
return ''
def __startmodel( self, glb ):
if 'startmodel' in glb: return glb['startmodel']
return ''
def __deconvolver_dflt( self, glb ):
return 'hogbom'
def __deconvolver( self, glb ):
if 'deconvolver' in glb: return glb['deconvolver']
return 'hogbom'
def __imsize_dflt( self, glb ):
return [ int(100) ]
def __imsize( self, glb ):
if 'imsize' in glb: return glb['imsize']
return [ int(100) ]
def __calcpsf_dflt( self, glb ):
return True
def __calcpsf( self, glb ):
if 'calcpsf' in glb: return glb['calcpsf']
return True
def __niter_dflt( self, glb ):
return int(0)
def __niter( self, glb ):
if 'niter' in glb: return glb['niter']
return int(0)
def __selectdata_dflt( self, glb ):
return True
def __selectdata( self, glb ):
if 'selectdata' in glb: return glb['selectdata']
return True
def __imageprefix_dflt( self, glb ):
return ''
def __imageprefix( self, glb ):
if 'imageprefix' in glb: return glb['imageprefix']
return ''
def __outlierfile_dflt( self, glb ):
return ''
def __outlierfile( self, glb ):
if 'outlierfile' in glb: return glb['outlierfile']
return ''
def __calcres_dflt( self, glb ):
return True
def __calcres( self, glb ):
if 'calcres' in glb: return glb['calcres']
return True
def __ncpu_dflt( self, glb ):
return int(8)
def __ncpu( self, glb ):
if 'ncpu' in glb: return glb['ncpu']
return int(8)
def __savemodel_dflt( self, glb ):
return 'none'
def __savemodel( self, glb ):
if 'savemodel' in glb: return glb['savemodel']
return 'none'
def __usemask_dflt( self, glb ):
return 'user'
def __usemask( self, glb ):
if 'usemask' in glb: return glb['usemask']
return 'user'
def __specmode_dflt( self, glb ):
return 'mfs'
def __specmode( self, glb ):
if 'specmode' in glb: return glb['specmode']
return 'mfs'
def __restoration_dflt( self, glb ):
return True
def __restoration( self, glb ):
if 'restoration' in glb: return glb['restoration']
return True
def __stokes_dflt( self, glb ):
return 'I'
def __stokes( self, glb ):
if 'stokes' in glb: return glb['stokes']
return 'I'
def __fastnoise_dflt( self, glb ):
return True
def __fastnoise( self, glb ):
if 'fastnoise' in glb: return glb['fastnoise']
return True
def __imagename_dflt( self, glb ):
return ''
def __imagename( self, glb ):
if 'imagename' in glb: return glb['imagename']
return ''
def __weighting_dflt( self, glb ):
return 'natural'
def __weighting( self, glb ):
if 'weighting' in glb: return glb['weighting']
return 'natural'
def __gridder_dflt( self, glb ):
return 'standard'
def __gridder( self, glb ):
if 'gridder' in glb: return glb['gridder']
return 'standard'
def __overwrite_dflt( self, glb ):
return False
def __overwrite( self, glb ):
if 'overwrite' in glb: return glb['overwrite']
return False
def __doreg_dflt( self, glb ):
return False
def __doreg( self, glb ):
if 'doreg' in glb: return glb['doreg']
return False
#--------- return inp/go default --------------------------------------------------
def __antenna_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __smoothfactor_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(1.0)
return None
def __negativethreshold_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(0.0)
return None
def __minbeamfrac_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(0.3)
return None
def __psfphasecenter_dflt( self, glb ):
if self.__gridder( glb ) == "mosaic": return ""
if self.__gridder( glb ) == "mosaicft": return ""
return None
def __mask_dflt( self, glb ):
if self.__usemask( glb ) == "user": return ""
return None
def __sclfactor_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return float(1.0)
return None
def __field_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __cutthreshold_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(0.01)
return None
def __pblimit_dflt( self, glb ):
if self.__gridder( glb ) == "standard": return float(0.2)
if self.__gridder( glb ) == "widefield": return float(0.2)
if self.__gridder( glb ) == "wproject": return float(0.2)
if self.__gridder( glb ) == "wprojectft": return float(0.2)
if self.__gridder( glb ) == "mosaic": return float(0.2)
if self.__gridder( glb ) == "mosaicft": return float(0.2)
if self.__gridder( glb ) == "ftmosaic": return float(0.2)
if self.__gridder( glb ) == "imagemosaic": return float(0.2)
if self.__gridder( glb ) == "awproject": return float(0.2)
if self.__gridder( glb ) == "awprojectft": return float(0.2)
return None
def __smallscalebias_dflt( self, glb ):
if self.__deconvolver( glb ) == "multiscale": return float(0.0)
if self.__deconvolver( glb ) == "mtmfs": return float(0.0)
return None
def __maxpsffraction_dflt( self, glb ):
if self.__niter( glb ) != int(0): return float(0.8)
return None
def __verbose_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return bool(False)
return None
def __intent_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __noise_dflt( self, glb ):
if self.__weighting( glb ) == "briggsabs": return "1.0Jy"
return None
def __interpolation_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return "linear"
if self.__specmode( glb ) == "cubesource": return "linear"
if self.__specmode( glb ) == "cubedata": return "linear"
return None
def __subregion_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return ""
return None
def __nterms_dflt( self, glb ):
if self.__deconvolver( glb ) == "mtmfs": return int(2)
return None
def __pointingoffsetsigdev_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return []
if self.__gridder( glb ) == "awprojectft": return []
return None
def __nchan_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return int(-1)
if self.__specmode( glb ) == "cubesource": return int(-1)
if self.__specmode( glb ) == "cubedata": return int(-1)
return None
def __reffreq_dflt( self, glb ):
if self.__specmode( glb ) == "mfs": return ""
return None
def __conjbeams_dflt( self, glb ):
if self.__gridder( glb ) == "mosaic": return bool(False)
if self.__gridder( glb ) == "mosaicft": return bool(False)
if self.__gridder( glb ) == "awproject": return bool(False)
if self.__gridder( glb ) == "awprojectft": return bool(False)
return None
def __restoringbeam_dflt( self, glb ):
if self.__restoration( glb ) == bool(True): return []
return None
def __sidelobethreshold_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(3.0)
return None
def __reftime_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return ""
return None
def __cycleniter_dflt( self, glb ):
if self.__niter( glb ) != int(0): return int(-1)
return None
def __minpsffraction_dflt( self, glb ):
if self.__niter( glb ) != int(0): return float(0.05)
return None
def __scan_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __computepastep_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return float(360.0)
if self.__gridder( glb ) == "awprojectft": return float(360.0)
return None
def __minpercentchange_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(-1.0)
return None
def __wbawp_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return bool(True)
if self.__gridder( glb ) == "awprojectft": return bool(True)
return None
def __docompress_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return bool(False)
return None
def __interactive_dflt( self, glb ):
if self.__niter( glb ) != int(0): return False
return None
def __npixels_dflt( self, glb ):
if self.__weighting( glb ) == "briggs": return int(0)
if self.__weighting( glb ) == "briggsabs": return int(0)
return None
def __mosweight_dflt( self, glb ):
if self.__gridder( glb ) == "mosaic": return bool(True)
if self.__gridder( glb ) == "ftmosaic": return bool(True)
if self.__gridder( glb ) == "awproject": return bool(False)
if self.__gridder( glb ) == "awprojectft": return bool(False)
return None
def __pbcor_dflt( self, glb ):
if self.__restoration( glb ) == bool(True): return bool(False)
return None
def __normtype_dflt( self, glb ):
if self.__gridder( glb ) == "mosaic": return "flatnoise"
if self.__gridder( glb ) == "mosaicft": return "flatnoise"
if self.__gridder( glb ) == "ftmosaic": return "flatnoise"
if self.__gridder( glb ) == "imagemosaic": return "flatnoise"
if self.__gridder( glb ) == "awproject": return "flatnoise"
if self.__gridder( glb ) == "awprojectft": return "flatnoise"
return None
def __uvtaper_dflt( self, glb ):
if self.__weighting( glb ) == "natural": return []
if self.__weighting( glb ) == "briggs": return []
if self.__weighting( glb ) == "briggsabs": return []
if self.__weighting( glb ) == "briggsbwtaper": return []
return None
def __cyclefactor_dflt( self, glb ):
if self.__niter( glb ) != int(0): return float(1.0)
return None
def __toTb_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return bool(False)
return None
def __restfreq_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return []
if self.__specmode( glb ) == "cubesource": return []
if self.__specmode( glb ) == "cubedata": return []
return None
def __pbmask_dflt( self, glb ):
if self.__usemask( glb ) == "user": return float(0.0)
if self.__usemask( glb ) == "pb": return float(0.2)
if self.__usemask( glb ) == "auto-multithresh": return float(0.2)
return None
def __growiterations_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return int(75)
return None
def __gain_dflt( self, glb ):
if self.__niter( glb ) != int(0): return float(0.1)
return None
def __scales_dflt( self, glb ):
if self.__deconvolver( glb ) == "multiscale": return []
if self.__deconvolver( glb ) == "mtmfs": return []
return None
def __psfcutoff_dflt( self, glb ):
if self.__calcpsf( glb ) == bool(True): return float(0.35)
return None
def __robust_dflt( self, glb ):
if self.__weighting( glb ) == "briggs": return float(0.5)
if self.__weighting( glb ) == "briggsabs": return float(0.5)
if self.__weighting( glb ) == "briggsbwtaper": return float(0.5)
return None
def __vptable_dflt( self, glb ):
if self.__gridder( glb ) == "standard": return ""
if self.__gridder( glb ) == "widefield": return ""
if self.__gridder( glb ) == "wproject": return ""
if self.__gridder( glb ) == "wprojectft": return ""
if self.__gridder( glb ) == "mosaic": return ""
if self.__gridder( glb ) == "mosaicft": return ""
if self.__gridder( glb ) == "ftmosaic": return ""
if self.__gridder( glb ) == "imagemosaic": return ""
return None
def __perchanweightdensity_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return bool(True)
if self.__specmode( glb ) == "cubesource": return bool(True)
if self.__specmode( glb ) == "cubedata": return bool(False)
return None
def __aterm_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return bool(True)
if self.__gridder( glb ) == "awprojectft": return bool(True)
return None
def __usephacenter_dflt( self, glb ):
if self.__doreg( glb ) == bool(True): return bool(True)
return None
def __usepointing_dflt( self, glb ):
if self.__gridder( glb ) == "mosaic": return bool(False)
if self.__gridder( glb ) == "mosaicft": return bool(False)
if self.__gridder( glb ) == "ftmosaic": return bool(False)
if self.__gridder( glb ) == "awproject": return bool(False)
if self.__gridder( glb ) == "awprojectft": return bool(False)
return None
def __rotatepastep_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return float(360.0)
if self.__gridder( glb ) == "awprojectft": return float(360.0)
return None
def __threshold_dflt( self, glb ):
if self.__niter( glb ) != int(0): return 0.0
return None
def __veltype_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return "radio"
if self.__specmode( glb ) == "cubesource": return "radio"
if self.__specmode( glb ) == "cubedata": return "radio"
return None
def __outframe_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return ""
if self.__specmode( glb ) == "cubesource": return "REST"
return None
def __dogrowprune_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return bool(True)
return None
def __uvrange_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __psterm_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return bool(False)
if self.__gridder( glb ) == "awprojectft": return bool(False)
return None
def __start_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return ""
if self.__specmode( glb ) == "cubesource": return ""
if self.__specmode( glb ) == "cubedata": return ""
return None
def __observation_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __lownoisethreshold_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(1.5)
return None
def __facets_dflt( self, glb ):
if self.__gridder( glb ) == "widefield": return int(1)
return None
def __noisethreshold_dflt( self, glb ):
if self.__usemask( glb ) == "auto-multithresh": return float(5.0)
return None
def __width_dflt( self, glb ):
if self.__specmode( glb ) == "cube": return ""
if self.__specmode( glb ) == "cubesource": return ""
if self.__specmode( glb ) == "cubedata": return ""
return None
def __spw_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __timerange_dflt( self, glb ):
if self.__selectdata( glb ) == bool(True): return ""
return None
def __nsigma_dflt( self, glb ):
if self.__niter( glb ) != int(0): return float(0.0)
return None
def __cfcache_dflt( self, glb ):
if self.__gridder( glb ) == "awproject": return ""
if self.__gridder( glb ) == "awprojectft": return ""
return None
def __wprojplanes_dflt( self, glb ):
if self.__gridder( glb ) == "widefield": return int(1)
if self.__gridder( glb ) == "wproject": return int(1)
if self.__gridder( glb ) == "wprojectft": return int(1)
if self.__gridder( glb ) == "imagemosaic": return int(1)
if self.__gridder( glb ) == "awproject": return int(1)
if self.__gridder( glb ) == "awprojectft": return int(1)
return None
#--------- return subparam values -------------------------------------------------
def __usephacenter( self, glb ):
if 'usephacenter' in glb: return glb['usephacenter']
dflt = self.__usephacenter_dflt( glb )
if dflt is not None: return dflt
return True
def __reftime( self, glb ):
if 'reftime' in glb: return glb['reftime']
dflt = self.__reftime_dflt( glb )
if dflt is not None: return dflt
return ''
def __toTb( self, glb ):
if 'toTb' in glb: return glb['toTb']
dflt = self.__toTb_dflt( glb )
if dflt is not None: return dflt
return False
def __sclfactor( self, glb ):
if 'sclfactor' in glb: return glb['sclfactor']
dflt = self.__sclfactor_dflt( glb )
if dflt is not None: return dflt
return float(1.0)
def __subregion( self, glb ):
if 'subregion' in glb: return glb['subregion']
dflt = self.__subregion_dflt( glb )
if dflt is not None: return dflt
return ''
def __docompress( self, glb ):
if 'docompress' in glb: return glb['docompress']
dflt = self.__docompress_dflt( glb )
if dflt is not None: return dflt
return False
def __field( self, glb ):
if 'field' in glb: return glb['field']
dflt = self.__field_dflt( glb )
if dflt is not None: return dflt
return ''
def __spw( self, glb ):
if 'spw' in glb: return glb['spw']
dflt = self.__spw_dflt( glb )
if dflt is not None: return dflt
return ''
def __timerange( self, glb ):
if 'timerange' in glb: return glb['timerange']
dflt = self.__timerange_dflt( glb )
if dflt is not None: return dflt
return ''
def __uvrange( self, glb ):
if 'uvrange' in glb: return glb['uvrange']
dflt = self.__uvrange_dflt( glb )
if dflt is not None: return dflt
return ''
def __antenna( self, glb ):
if 'antenna' in glb: return glb['antenna']
dflt = self.__antenna_dflt( glb )
if dflt is not None: return dflt
return ''
def __scan( self, glb ):
if 'scan' in glb: return glb['scan']
dflt = self.__scan_dflt( glb )
if dflt is not None: return dflt
return ''
def __observation( self, glb ):
if 'observation' in glb: return glb['observation']
dflt = self.__observation_dflt( glb )
if dflt is not None: return dflt
return ''
def __intent( self, glb ):
if 'intent' in glb: return glb['intent']
dflt = self.__intent_dflt( glb )
if dflt is not None: return dflt
return ''
def __reffreq( self, glb ):
if 'reffreq' in glb: return glb['reffreq']
dflt = self.__reffreq_dflt( glb )
if dflt is not None: return dflt
return ''
def __nchan( self, glb ):
if 'nchan' in glb: return glb['nchan']
dflt = self.__nchan_dflt( glb )
if dflt is not None: return dflt
return int(-1)
def __start( self, glb ):
if 'start' in glb: return glb['start']
dflt = self.__start_dflt( glb )
if dflt is not None: return dflt
return ''
def __width( self, glb ):
if 'width' in glb: return glb['width']
dflt = self.__width_dflt( glb )
if dflt is not None: return dflt
return ''
def __outframe( self, glb ):
if 'outframe' in glb: return glb['outframe']
dflt = self.__outframe_dflt( glb )
if dflt is not None: return dflt
return 'LSRK'
def __veltype( self, glb ):
if 'veltype' in glb: return glb['veltype']
dflt = self.__veltype_dflt( glb )
if dflt is not None: return dflt
return 'radio'
def __restfreq( self, glb ):
if 'restfreq' in glb: return glb['restfreq']
dflt = self.__restfreq_dflt( glb )
if dflt is not None: return dflt
return [ ]
def __interpolation( self, glb ):
if 'interpolation' in glb: return glb['interpolation']
dflt = self.__interpolation_dflt( glb )
if dflt is not None: return dflt
return 'linear'
def __perchanweightdensity( self, glb ):
if 'perchanweightdensity' in glb: return glb['perchanweightdensity']
dflt = self.__perchanweightdensity_dflt( glb )
if dflt is not None: return dflt
return True
def __facets( self, glb ):
if 'facets' in glb: return glb['facets']
dflt = self.__facets_dflt( glb )
if dflt is not None: return dflt
return int(1)
def __psfphasecenter( self, glb ):
if 'psfphasecenter' in glb: return glb['psfphasecenter']
dflt = self.__psfphasecenter_dflt( glb )
if dflt is not None: return dflt
return ''
def __wprojplanes( self, glb ):
if 'wprojplanes' in glb: return glb['wprojplanes']
dflt = self.__wprojplanes_dflt( glb )
if dflt is not None: return dflt
return int(1)
def __vptable( self, glb ):
if 'vptable' in glb: return glb['vptable']
dflt = self.__vptable_dflt( glb )
if dflt is not None: return dflt
return ''
def __mosweight( self, glb ):
if 'mosweight' in glb: return glb['mosweight']
dflt = self.__mosweight_dflt( glb )
if dflt is not None: return dflt
return True
def __aterm( self, glb ):
if 'aterm' in glb: return glb['aterm']
dflt = self.__aterm_dflt( glb )
if dflt is not None: return dflt
return True
def __psterm( self, glb ):
if 'psterm' in glb: return glb['psterm']
dflt = self.__psterm_dflt( glb )
if dflt is not None: return dflt
return False
def __wbawp( self, glb ):
if 'wbawp' in glb: return glb['wbawp']
dflt = self.__wbawp_dflt( glb )
if dflt is not None: return dflt
return True
def __conjbeams( self, glb ):
if 'conjbeams' in glb: return glb['conjbeams']
dflt = self.__conjbeams_dflt( glb )
if dflt is not None: return dflt
return False
def __cfcache( self, glb ):
if 'cfcache' in glb: return glb['cfcache']
dflt = self.__cfcache_dflt( glb )
if dflt is not None: return dflt
return ''
def __usepointing( self, glb ):
if 'usepointing' in glb: return glb['usepointing']
dflt = self.__usepointing_dflt( glb )
if dflt is not None: return dflt
return False
def __computepastep( self, glb ):
if 'computepastep' in glb: return glb['computepastep']
dflt = self.__computepastep_dflt( glb )
if dflt is not None: return dflt
return float(360.0)
def __rotatepastep( self, glb ):
if 'rotatepastep' in glb: return glb['rotatepastep']
dflt = self.__rotatepastep_dflt( glb )
if dflt is not None: return dflt
return float(360.0)
def __pointingoffsetsigdev( self, glb ):
if 'pointingoffsetsigdev' in glb: return glb['pointingoffsetsigdev']
dflt = self.__pointingoffsetsigdev_dflt( glb )
if dflt is not None: return dflt
return [ ]
def __pblimit( self, glb ):
if 'pblimit' in glb: return glb['pblimit']
dflt = self.__pblimit_dflt( glb )
if dflt is not None: return dflt
return float(0.2)
def __normtype( self, glb ):
if 'normtype' in glb: return glb['normtype']
dflt = self.__normtype_dflt( glb )
if dflt is not None: return dflt
return 'flatnoise'
def __scales( self, glb ):
if 'scales' in glb: return glb['scales']
dflt = self.__scales_dflt( glb )
if dflt is not None: return dflt
return [ ]
def __nterms( self, glb ):
if 'nterms' in glb: return glb['nterms']
dflt = self.__nterms_dflt( glb )
if dflt is not None: return dflt
return int(2)
def __smallscalebias( self, glb ):
if 'smallscalebias' in glb: return glb['smallscalebias']
dflt = self.__smallscalebias_dflt( glb )
if dflt is not None: return dflt
return float(0.0)
def __restoringbeam( self, glb ):
if 'restoringbeam' in glb: return glb['restoringbeam']
dflt = self.__restoringbeam_dflt( glb )
if dflt is not None: return dflt
return [ ]
def __pbcor( self, glb ):
if 'pbcor' in glb: return glb['pbcor']
dflt = self.__pbcor_dflt( glb )
if dflt is not None: return dflt
return False
def __robust( self, glb ):
if 'robust' in glb: return glb['robust']
dflt = self.__robust_dflt( glb )
if dflt is not None: return dflt
return float(0.5)
def __noise( self, glb ):
if 'noise' in glb: return glb['noise']
dflt = self.__noise_dflt( glb )
if dflt is not None: return dflt
return '1.0Jy'
def __npixels( self, glb ):
if 'npixels' in glb: return glb['npixels']
dflt = self.__npixels_dflt( glb )
if dflt is not None: return dflt
return int(0)
def __uvtaper( self, glb ):
if 'uvtaper' in glb: return glb['uvtaper']
dflt = self.__uvtaper_dflt( glb )
if dflt is not None: return dflt
return [ '' ]
def __gain( self, glb ):
if 'gain' in glb: return glb['gain']
dflt = self.__gain_dflt( glb )
if dflt is not None: return dflt
return float(0.1)
def __threshold( self, glb ):
if 'threshold' in glb: return glb['threshold']
dflt = self.__threshold_dflt( glb )
if dflt is not None: return dflt
return float(0.0)
def __nsigma( self, glb ):
if 'nsigma' in glb: return glb['nsigma']
dflt = self.__nsigma_dflt( glb )
if dflt is not None: return dflt
return float(0.0)
def __cycleniter( self, glb ):
if 'cycleniter' in glb: return glb['cycleniter']
dflt = self.__cycleniter_dflt( glb )
if dflt is not None: return dflt
return int(-1)
def __cyclefactor( self, glb ):
if 'cyclefactor' in glb: return glb['cyclefactor']
dflt = self.__cyclefactor_dflt( glb )
if dflt is not None: return dflt
return float(1.0)
def __minpsffraction( self, glb ):
if 'minpsffraction' in glb: return glb['minpsffraction']
dflt = self.__minpsffraction_dflt( glb )
if dflt is not None: return dflt
return float(0.05)
def __maxpsffraction( self, glb ):
if 'maxpsffraction' in glb: return glb['maxpsffraction']
dflt = self.__maxpsffraction_dflt( glb )
if dflt is not None: return dflt
return float(0.8)
def __interactive( self, glb ):
if 'interactive' in glb: return glb['interactive']
dflt = self.__interactive_dflt( glb )
if dflt is not None: return dflt
return False
def __mask( self, glb ):
if 'mask' in glb: return glb['mask']
dflt = self.__mask_dflt( glb )
if dflt is not None: return dflt
return ''
def __pbmask( self, glb ):
if 'pbmask' in glb: return glb['pbmask']
dflt = self.__pbmask_dflt( glb )
if dflt is not None: return dflt
return float(0.0)
def __sidelobethreshold( self, glb ):
if 'sidelobethreshold' in glb: return glb['sidelobethreshold']
dflt = self.__sidelobethreshold_dflt( glb )
if dflt is not None: return dflt
return float(3.0)
def __noisethreshold( self, glb ):
if 'noisethreshold' in glb: return glb['noisethreshold']
dflt = self.__noisethreshold_dflt( glb )
if dflt is not None: return dflt
return float(5.0)
def __lownoisethreshold( self, glb ):
if 'lownoisethreshold' in glb: return glb['lownoisethreshold']
dflt = self.__lownoisethreshold_dflt( glb )
if dflt is not None: return dflt
return float(1.5)
def __negativethreshold( self, glb ):
if 'negativethreshold' in glb: return glb['negativethreshold']
dflt = self.__negativethreshold_dflt( glb )
if dflt is not None: return dflt
return float(0.0)
def __smoothfactor( self, glb ):
if 'smoothfactor' in glb: return glb['smoothfactor']
dflt = self.__smoothfactor_dflt( glb )
if dflt is not None: return dflt
return float(1.0)
def __minbeamfrac( self, glb ):
if 'minbeamfrac' in glb: return glb['minbeamfrac']
dflt = self.__minbeamfrac_dflt( glb )
if dflt is not None: return dflt
return float(0.3)
def __cutthreshold( self, glb ):
if 'cutthreshold' in glb: return glb['cutthreshold']
dflt = self.__cutthreshold_dflt( glb )
if dflt is not None: return dflt
return float(0.01)
def __growiterations( self, glb ):
if 'growiterations' in glb: return glb['growiterations']
dflt = self.__growiterations_dflt( glb )
if dflt is not None: return dflt
return int(75)
def __dogrowprune( self, glb ):
if 'dogrowprune' in glb: return glb['dogrowprune']
dflt = self.__dogrowprune_dflt( glb )
if dflt is not None: return dflt
return True
def __minpercentchange( self, glb ):
if 'minpercentchange' in glb: return glb['minpercentchange']
dflt = self.__minpercentchange_dflt( glb )
if dflt is not None: return dflt
return float(-1.0)
def __verbose( self, glb ):
if 'verbose' in glb: return glb['verbose']
dflt = self.__verbose_dflt( glb )
if dflt is not None: return dflt
return False
def __psfcutoff( self, glb ):
if 'psfcutoff' in glb: return glb['psfcutoff']
dflt = self.__psfcutoff_dflt( glb )
if dflt is not None: return dflt
return float(0.35)
#--------- subparam inp output ----------------------------------------------------
def __vis_inp(self):
description = 'Name of input visibility file(s)'
value = self.__vis( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'vis': value},{'vis': self.__schema['vis']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('vis',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __imageprefix_inp(self):
description = ''
value = self.__imageprefix( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'imageprefix': value},{'imageprefix': self.__schema['imageprefix']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imageprefix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __imagesuffix_inp(self):
description = ''
value = self.__imagesuffix( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'imagesuffix': value},{'imagesuffix': self.__schema['imagesuffix']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagesuffix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __ncpu_inp(self):
description = ''
value = self.__ncpu( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'ncpu': value},{'ncpu': self.__schema['ncpu']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('ncpu',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __twidth_inp(self):
description = ''
value = self.__twidth( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'twidth': value},{'twidth': self.__schema['twidth']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('twidth',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __doreg_inp(self):
description = ''
value = self.__doreg( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'doreg': value},{'doreg': self.__schema['doreg']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('doreg',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __usephacenter_inp(self):
if self.__usephacenter_dflt( self.__globals_( ) ) is not None:
description = ''
value = self.__usephacenter( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'usephacenter': value},{'usephacenter': self.__schema['usephacenter']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usephacenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __reftime_inp(self):
if self.__reftime_dflt( self.__globals_( ) ) is not None:
description = ''
value = self.__reftime( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'reftime': value},{'reftime': self.__schema['reftime']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reftime',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __toTb_inp(self):
if self.__toTb_dflt( self.__globals_( ) ) is not None:
description = ''
value = self.__toTb( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'toTb': value},{'toTb': self.__schema['toTb']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('toTb',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __sclfactor_inp(self):
if self.__sclfactor_dflt( self.__globals_( ) ) is not None:
description = ''
value = self.__sclfactor( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'sclfactor': value},{'sclfactor': self.__schema['sclfactor']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sclfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __subregion_inp(self):
if self.__subregion_dflt( self.__globals_( ) ) is not None:
description = 'The name of a CASA region string'
value = self.__subregion( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'subregion': value},{'subregion': self.__schema['subregion']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('subregion',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __docompress_inp(self):
if self.__docompress_dflt( self.__globals_( ) ) is not None:
description = ''
value = self.__docompress( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'docompress': value},{'docompress': self.__schema['docompress']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('docompress',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __overwrite_inp(self):
description = ''
value = self.__overwrite( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'overwrite': value},{'overwrite': self.__schema['overwrite']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('overwrite',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __selectdata_inp(self):
description = 'Enable data selection parameters'
value = self.__selectdata( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'selectdata': value},{'selectdata': self.__schema['selectdata']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('selectdata',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __field_inp(self):
if self.__field_dflt( self.__globals_( ) ) is not None:
description = 'field(s) to select'
value = self.__field( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'field': value},{'field': self.__schema['field']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('field',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __spw_inp(self):
if self.__spw_dflt( self.__globals_( ) ) is not None:
description = 'spw(s)/channels to select'
value = self.__spw( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'spw': value},{'spw': self.__schema['spw']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('spw',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __timerange_inp(self):
if self.__timerange_dflt( self.__globals_( ) ) is not None:
description = 'Range of time to select from data'
value = self.__timerange( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'timerange': value},{'timerange': self.__schema['timerange']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('timerange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __uvrange_inp(self):
if self.__uvrange_dflt( self.__globals_( ) ) is not None:
description = 'Select data within uvrange'
value = self.__uvrange( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'uvrange': value},{'uvrange': self.__schema['uvrange']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvrange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __antenna_inp(self):
if self.__antenna_dflt( self.__globals_( ) ) is not None:
description = 'Select data based on antenna/baseline'
value = self.__antenna( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'antenna': value},{'antenna': self.__schema['antenna']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('antenna',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __scan_inp(self):
if self.__scan_dflt( self.__globals_( ) ) is not None:
description = 'Scan number range'
value = self.__scan( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'scan': value},{'scan': self.__schema['scan']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __observation_inp(self):
if self.__observation_dflt( self.__globals_( ) ) is not None:
description = 'Observation ID range'
value = self.__observation( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'observation': value},{'observation': self.__schema['observation']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('observation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __intent_inp(self):
if self.__intent_dflt( self.__globals_( ) ) is not None:
description = 'Scan Intent(s)'
value = self.__intent( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'intent': value},{'intent': self.__schema['intent']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('intent',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __datacolumn_inp(self):
description = 'Data column to image(data,corrected)'
value = self.__datacolumn( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'datacolumn': value},{'datacolumn': self.__schema['datacolumn']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('datacolumn',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __imagename_inp(self):
description = 'Pre-name of output images'
value = self.__imagename( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'imagename': value},{'imagename': self.__schema['imagename']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagename',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __imsize_inp(self):
description = 'Number of pixels'
value = self.__imsize( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'imsize': value},{'imsize': self.__schema['imsize']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imsize',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __cell_inp(self):
description = 'Cell size'
value = self.__cell( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'cell': value},{'cell': self.__schema['cell']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('cell',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __phasecenter_inp(self):
description = 'Phase center of the image'
value = self.__phasecenter( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'phasecenter': value},{'phasecenter': self.__schema['phasecenter']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('phasecenter',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __stokes_inp(self):
description = 'Stokes Planes to make'
value = self.__stokes( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'stokes': value},{'stokes': self.__schema['stokes']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('stokes',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __projection_inp(self):
description = 'Coordinate projection'
value = self.__projection( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'projection': value},{'projection': self.__schema['projection']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('projection',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __startmodel_inp(self):
description = 'Name of starting model image'
value = self.__startmodel( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'startmodel': value},{'startmodel': self.__schema['startmodel']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('startmodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __specmode_inp(self):
description = 'Spectral definition mode (mfs,cube,cubedata, cubesource)'
value = self.__specmode( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'specmode': value},{'specmode': self.__schema['specmode']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('specmode',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __reffreq_inp(self):
if self.__reffreq_dflt( self.__globals_( ) ) is not None:
description = 'Reference frequency'
value = self.__reffreq( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'reffreq': value},{'reffreq': self.__schema['reffreq']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reffreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __nchan_inp(self):
if self.__nchan_dflt( self.__globals_( ) ) is not None:
description = 'Number of channels in the output image'
value = self.__nchan( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'nchan': value},{'nchan': self.__schema['nchan']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nchan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __start_inp(self):
if self.__start_dflt( self.__globals_( ) ) is not None:
description = 'First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\')'
value = self.__start( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'start': value},{'start': self.__schema['start']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('start',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __width_inp(self):
if self.__width_dflt( self.__globals_( ) ) is not None:
description = 'Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\')'
value = self.__width( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'width': value},{'width': self.__schema['width']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('width',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __outframe_inp(self):
if self.__outframe_dflt( self.__globals_( ) ) is not None:
description = 'Spectral reference frame in which to interpret \'start\' and \'width\''
value = self.__outframe( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'outframe': value},{'outframe': self.__schema['outframe']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('outframe',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __veltype_inp(self):
if self.__veltype_dflt( self.__globals_( ) ) is not None:
description = 'Velocity type (radio, z, ratio, beta, gamma, optical)'
value = self.__veltype( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'veltype': value},{'veltype': self.__schema['veltype']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('veltype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __restfreq_inp(self):
if self.__restfreq_dflt( self.__globals_( ) ) is not None:
description = 'List of rest frequencies'
value = self.__restfreq( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'restfreq': value},{'restfreq': self.__schema['restfreq']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restfreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __interpolation_inp(self):
if self.__interpolation_dflt( self.__globals_( ) ) is not None:
description = 'Spectral interpolation (nearest,linear,cubic)'
value = self.__interpolation( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'interpolation': value},{'interpolation': self.__schema['interpolation']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interpolation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __perchanweightdensity_inp(self):
if self.__perchanweightdensity_dflt( self.__globals_( ) ) is not None:
description = 'whether to calculate weight density per channel in Briggs style weighting or not'
value = self.__perchanweightdensity( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'perchanweightdensity': value},{'perchanweightdensity': self.__schema['perchanweightdensity']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('perchanweightdensity',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __gridder_inp(self):
description = 'Gridding options (standard, wproject, widefield, mosaic, awproject)'
value = self.__gridder( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'gridder': value},{'gridder': self.__schema['gridder']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('gridder',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __facets_inp(self):
if self.__facets_dflt( self.__globals_( ) ) is not None:
description = 'Number of facets on a side'
value = self.__facets( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'facets': value},{'facets': self.__schema['facets']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('facets',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __psfphasecenter_inp(self):
if self.__psfphasecenter_dflt( self.__globals_( ) ) is not None:
description = 'optional direction to calculate psf for mosaic (default is image phasecenter)'
value = self.__psfphasecenter( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'psfphasecenter': value},{'psfphasecenter': self.__schema['psfphasecenter']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfphasecenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __wprojplanes_inp(self):
if self.__wprojplanes_dflt( self.__globals_( ) ) is not None:
description = 'Number of distinct w-values for convolution functions'
value = self.__wprojplanes( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'wprojplanes': value},{'wprojplanes': self.__schema['wprojplanes']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wprojplanes',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __vptable_inp(self):
if self.__vptable_dflt( self.__globals_( ) ) is not None:
description = 'Name of Voltage Pattern table'
value = self.__vptable( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'vptable': value},{'vptable': self.__schema['vptable']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('vptable',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __mosweight_inp(self):
if self.__mosweight_dflt( self.__globals_( ) ) is not None:
description = 'Indepently weight each field in a mosaic'
value = self.__mosweight( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'mosweight': value},{'mosweight': self.__schema['mosweight']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mosweight',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __aterm_inp(self):
if self.__aterm_dflt( self.__globals_( ) ) is not None:
description = 'Use aperture illumination functions during gridding'
value = self.__aterm( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'aterm': value},{'aterm': self.__schema['aterm']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('aterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __psterm_inp(self):
if self.__psterm_dflt( self.__globals_( ) ) is not None:
description = 'Use prolate spheroidal during gridding'
value = self.__psterm( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'psterm': value},{'psterm': self.__schema['psterm']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __wbawp_inp(self):
if self.__wbawp_dflt( self.__globals_( ) ) is not None:
description = 'Use wideband A-terms'
value = self.__wbawp( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'wbawp': value},{'wbawp': self.__schema['wbawp']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wbawp',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __conjbeams_inp(self):
if self.__conjbeams_dflt( self.__globals_( ) ) is not None:
description = 'Use conjugate frequency for wideband A-terms'
value = self.__conjbeams( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'conjbeams': value},{'conjbeams': self.__schema['conjbeams']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('conjbeams',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __cfcache_inp(self):
if self.__cfcache_dflt( self.__globals_( ) ) is not None:
description = 'Convolution function cache directory name'
value = self.__cfcache( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'cfcache': value},{'cfcache': self.__schema['cfcache']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cfcache',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __usepointing_inp(self):
if self.__usepointing_dflt( self.__globals_( ) ) is not None:
description = 'The parameter makes the gridder utilize the pointing table phase directions while computing the residual image.'
value = self.__usepointing( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'usepointing': value},{'usepointing': self.__schema['usepointing']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usepointing',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __computepastep_inp(self):
if self.__computepastep_dflt( self.__globals_( ) ) is not None:
description = 'Parallactic angle interval after the AIFs are recomputed (deg)'
value = self.__computepastep( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'computepastep': value},{'computepastep': self.__schema['computepastep']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('computepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __rotatepastep_inp(self):
if self.__rotatepastep_dflt( self.__globals_( ) ) is not None:
description = 'Parallactic angle interval after which the nearest AIF is rotated (deg)'
value = self.__rotatepastep( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'rotatepastep': value},{'rotatepastep': self.__schema['rotatepastep']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('rotatepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __pointingoffsetsigdev_inp(self):
if self.__pointingoffsetsigdev_dflt( self.__globals_( ) ) is not None:
description = 'Pointing offset threshold to determine heterogeneity of pointing corrections for the AWProject gridder'
value = self.__pointingoffsetsigdev( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'pointingoffsetsigdev': value},{'pointingoffsetsigdev': self.__schema['pointingoffsetsigdev']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pointingoffsetsigdev',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __pblimit_inp(self):
if self.__pblimit_dflt( self.__globals_( ) ) is not None:
description = 'PB gain level at which to cut off normalizations'
value = self.__pblimit( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'pblimit': value},{'pblimit': self.__schema['pblimit']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pblimit',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __normtype_inp(self):
if self.__normtype_dflt( self.__globals_( ) ) is not None:
description = 'Normalization type (flatnoise, flatsky,pbsquare)'
value = self.__normtype( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'normtype': value},{'normtype': self.__schema['normtype']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('normtype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __deconvolver_inp(self):
description = 'Minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes)'
value = self.__deconvolver( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'deconvolver': value},{'deconvolver': self.__schema['deconvolver']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('deconvolver',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __scales_inp(self):
if self.__scales_dflt( self.__globals_( ) ) is not None:
description = 'List of scale sizes (in pixels) for multi-scale algorithms'
value = self.__scales( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'scales': value},{'scales': self.__schema['scales']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scales',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __nterms_inp(self):
if self.__nterms_dflt( self.__globals_( ) ) is not None:
description = 'Number of Taylor coefficients in the spectral model'
value = self.__nterms( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'nterms': value},{'nterms': self.__schema['nterms']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nterms',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __smallscalebias_inp(self):
if self.__smallscalebias_dflt( self.__globals_( ) ) is not None:
description = 'Biases the scale selection when using multi-scale or mtmfs deconvolvers'
value = self.__smallscalebias( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'smallscalebias': value},{'smallscalebias': self.__schema['smallscalebias']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smallscalebias',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __restoration_inp(self):
description = 'Do restoration steps (or not)'
value = self.__restoration( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'restoration': value},{'restoration': self.__schema['restoration']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('restoration',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __restoringbeam_inp(self):
if self.__restoringbeam_dflt( self.__globals_( ) ) is not None:
description = 'Restoring beam shape to use. Default is the PSF main lobe'
value = self.__restoringbeam( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'restoringbeam': value},{'restoringbeam': self.__schema['restoringbeam']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restoringbeam',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __pbcor_inp(self):
if self.__pbcor_dflt( self.__globals_( ) ) is not None:
description = 'Apply PB correction on the output restored image'
value = self.__pbcor( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'pbcor': value},{'pbcor': self.__schema['pbcor']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbcor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __outlierfile_inp(self):
description = 'Name of outlier-field image definitions'
value = self.__outlierfile( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'outlierfile': value},{'outlierfile': self.__schema['outlierfile']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('outlierfile',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __weighting_inp(self):
description = 'Weighting scheme (natural,uniform,briggs, briggsabs[experimental], briggsbwtaper[experimental])'
value = self.__weighting( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'weighting': value},{'weighting': self.__schema['weighting']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('weighting',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __robust_inp(self):
if self.__robust_dflt( self.__globals_( ) ) is not None:
description = 'Robustness parameter'
value = self.__robust( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'robust': value},{'robust': self.__schema['robust']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('robust',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __noise_inp(self):
if self.__noise_dflt( self.__globals_( ) ) is not None:
description = 'noise parameter for briggs abs mode weighting'
value = self.__noise( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'noise': value},{'noise': self.__schema['noise']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noise',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __npixels_inp(self):
if self.__npixels_dflt( self.__globals_( ) ) is not None:
description = 'Number of pixels to determine uv-cell size'
value = self.__npixels( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'npixels': value},{'npixels': self.__schema['npixels']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('npixels',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __uvtaper_inp(self):
if self.__uvtaper_dflt( self.__globals_( ) ) is not None:
description = 'uv-taper on outer baselines in uv-plane'
value = self.__uvtaper( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'uvtaper': value},{'uvtaper': self.__schema['uvtaper']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvtaper',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __niter_inp(self):
description = 'Maximum number of iterations'
value = self.__niter( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'niter': value},{'niter': self.__schema['niter']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('niter',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __gain_inp(self):
if self.__gain_dflt( self.__globals_( ) ) is not None:
description = 'Loop gain'
value = self.__gain( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'gain': value},{'gain': self.__schema['gain']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('gain',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __threshold_inp(self):
if self.__threshold_dflt( self.__globals_( ) ) is not None:
description = 'Stopping threshold'
value = self.__threshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'threshold': value},{'threshold': self.__schema['threshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('threshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __nsigma_inp(self):
if self.__nsigma_dflt( self.__globals_( ) ) is not None:
description = 'Multiplicative factor for rms-based threshold stopping'
value = self.__nsigma( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'nsigma': value},{'nsigma': self.__schema['nsigma']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nsigma',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __cycleniter_inp(self):
if self.__cycleniter_dflt( self.__globals_( ) ) is not None:
description = 'Maximum number of minor-cycle iterations'
value = self.__cycleniter( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'cycleniter': value},{'cycleniter': self.__schema['cycleniter']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cycleniter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __cyclefactor_inp(self):
if self.__cyclefactor_dflt( self.__globals_( ) ) is not None:
description = 'Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold.'
value = self.__cyclefactor( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'cyclefactor': value},{'cyclefactor': self.__schema['cyclefactor']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cyclefactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __minpsffraction_inp(self):
if self.__minpsffraction_dflt( self.__globals_( ) ) is not None:
description = 'PSF fraction that marks the max depth of cleaning in the minor cycle'
value = self.__minpsffraction( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'minpsffraction': value},{'minpsffraction': self.__schema['minpsffraction']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __maxpsffraction_inp(self):
if self.__maxpsffraction_dflt( self.__globals_( ) ) is not None:
description = 'PSF fraction that marks the minimum depth of cleaning in the minor cycle'
value = self.__maxpsffraction( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'maxpsffraction': value},{'maxpsffraction': self.__schema['maxpsffraction']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('maxpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __interactive_inp(self):
if self.__interactive_dflt( self.__globals_( ) ) is not None:
description = 'Modify masks and parameters at runtime'
value = self.__interactive( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'interactive': value},{'interactive': self.__schema['interactive']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interactive',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __usemask_inp(self):
description = 'Type of mask(s) for deconvolution: user, pb, or auto-multithresh'
value = self.__usemask( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'usemask': value},{'usemask': self.__schema['usemask']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('usemask',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __mask_inp(self):
if self.__mask_dflt( self.__globals_( ) ) is not None:
description = 'Mask (a list of image name(s) or region file(s) or region string(s) )'
value = self.__mask( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'mask': value},{'mask': self.__schema['mask']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __pbmask_inp(self):
if self.__pbmask_dflt( self.__globals_( ) ) is not None:
description = 'primary beam mask'
value = self.__pbmask( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'pbmask': value},{'pbmask': self.__schema['pbmask']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbmask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __sidelobethreshold_inp(self):
if self.__sidelobethreshold_dflt( self.__globals_( ) ) is not None:
description = 'sidelobethreshold * the max sidelobe level * peak residual'
value = self.__sidelobethreshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'sidelobethreshold': value},{'sidelobethreshold': self.__schema['sidelobethreshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sidelobethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __noisethreshold_inp(self):
if self.__noisethreshold_dflt( self.__globals_( ) ) is not None:
description = 'noisethreshold * rms in residual image + location(median)'
value = self.__noisethreshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'noisethreshold': value},{'noisethreshold': self.__schema['noisethreshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __lownoisethreshold_inp(self):
if self.__lownoisethreshold_dflt( self.__globals_( ) ) is not None:
description = 'lownoisethreshold * rms in residual image + location(median)'
value = self.__lownoisethreshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'lownoisethreshold': value},{'lownoisethreshold': self.__schema['lownoisethreshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('lownoisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __negativethreshold_inp(self):
if self.__negativethreshold_dflt( self.__globals_( ) ) is not None:
description = 'negativethreshold * rms in residual image + location(median)'
value = self.__negativethreshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'negativethreshold': value},{'negativethreshold': self.__schema['negativethreshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('negativethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __smoothfactor_inp(self):
if self.__smoothfactor_dflt( self.__globals_( ) ) is not None:
description = 'smoothing factor in a unit of the beam'
value = self.__smoothfactor( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'smoothfactor': value},{'smoothfactor': self.__schema['smoothfactor']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smoothfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __minbeamfrac_inp(self):
if self.__minbeamfrac_dflt( self.__globals_( ) ) is not None:
description = 'minimum beam fraction for pruning'
value = self.__minbeamfrac( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'minbeamfrac': value},{'minbeamfrac': self.__schema['minbeamfrac']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minbeamfrac',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __cutthreshold_inp(self):
if self.__cutthreshold_dflt( self.__globals_( ) ) is not None:
description = 'threshold to cut the smoothed mask to create a final mask'
value = self.__cutthreshold( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'cutthreshold': value},{'cutthreshold': self.__schema['cutthreshold']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cutthreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __growiterations_inp(self):
if self.__growiterations_dflt( self.__globals_( ) ) is not None:
description = 'number of binary dilation iterations for growing the mask'
value = self.__growiterations( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'growiterations': value},{'growiterations': self.__schema['growiterations']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('growiterations',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __dogrowprune_inp(self):
if self.__dogrowprune_dflt( self.__globals_( ) ) is not None:
description = 'Do pruning on the grow mask'
value = self.__dogrowprune( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'dogrowprune': value},{'dogrowprune': self.__schema['dogrowprune']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('dogrowprune',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __minpercentchange_inp(self):
if self.__minpercentchange_dflt( self.__globals_( ) ) is not None:
description = 'minimum percentage change in mask size (per channel plane) to trigger updating of mask by automask'
value = self.__minpercentchange( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'minpercentchange': value},{'minpercentchange': self.__schema['minpercentchange']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpercentchange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __verbose_inp(self):
if self.__verbose_dflt( self.__globals_( ) ) is not None:
description = 'True: print more automasking information in the logger'
value = self.__verbose( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'verbose': value},{'verbose': self.__schema['verbose']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('verbose',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __fastnoise_inp(self):
description = 'True: use the faster (old) noise calculation. False: use the new improved noise calculations'
value = self.__fastnoise( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'fastnoise': value},{'fastnoise': self.__schema['fastnoise']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('fastnoise',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __restart_inp(self):
description = 'True : Re-use existing images. False : Increment imagename'
value = self.__restart( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'restart': value},{'restart': self.__schema['restart']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('restart',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __savemodel_inp(self):
description = 'Options to save model visibilities (none, virtual, modelcolumn)'
value = self.__savemodel( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'savemodel': value},{'savemodel': self.__schema['savemodel']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('savemodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __calcres_inp(self):
description = 'Calculate initial residual image'
value = self.__calcres( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'calcres': value},{'calcres': self.__schema['calcres']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('calcres',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
def __calcpsf_inp(self):
description = 'Calculate PSF'
value = self.__calcpsf( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'calcpsf': value},{'calcpsf': self.__schema['calcpsf']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('calcpsf',pre,self.__to_string_(value),post),description,13+len(pre)+len(post))
def __psfcutoff_inp(self):
if self.__psfcutoff_dflt( self.__globals_( ) ) is not None:
description = 'All pixels in the main lobe of the PSF above psfcutoff are used to fit a Gaussian beam (the Clean beam).'
value = self.__psfcutoff( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'psfcutoff': value},{'psfcutoff': self.__schema['psfcutoff']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfcutoff',pre,self.__to_string_(value),post),description,9+len(pre)+len(post))
def __parallel_inp(self):
description = 'Run major cycles in parallel'
value = self.__parallel( self.__globals_( ) )
(pre,post) = ('','') if self.__validate_({'parallel': value},{'parallel': self.__schema['parallel']}) else ('\x1B[91m','\x1B[0m')
self.__do_inp_output('%-23.23s = %s%-23s%s' % ('parallel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post))
#--------- global default implementation-------------------------------------------
@static_var('state', __sf__('casa_inp_go_state'))
def set_global_defaults(self):
self.set_global_defaults.state['last'] = self
glb = self.__globals_( )
if 'antenna' in glb: del glb['antenna']
if 'smoothfactor' in glb: del glb['smoothfactor']
if 'stokes' in glb: del glb['stokes']
if 'negativethreshold' in glb: del glb['negativethreshold']
if 'deconvolver' in glb: del glb['deconvolver']
if 'minbeamfrac' in glb: del glb['minbeamfrac']
if 'doreg' in glb: del glb['doreg']
if 'savemodel' in glb: del glb['savemodel']
if 'psfphasecenter' in glb: del glb['psfphasecenter']
if 'mask' in glb: del glb['mask']
if 'sclfactor' in glb: del glb['sclfactor']
if 'field' in glb: del glb['field']
if 'cutthreshold' in glb: del glb['cutthreshold']
if 'projection' in glb: del glb['projection']
if 'pblimit' in glb: del glb['pblimit']
if 'smallscalebias' in glb: del glb['smallscalebias']
if 'maxpsffraction' in glb: del glb['maxpsffraction']
if 'datacolumn' in glb: del glb['datacolumn']
if 'verbose' in glb: del glb['verbose']
if 'weighting' in glb: del glb['weighting']
if 'intent' in glb: del glb['intent']
if 'noise' in glb: del glb['noise']
if 'interpolation' in glb: del glb['interpolation']
if 'subregion' in glb: del glb['subregion']
if 'nterms' in glb: del glb['nterms']
if 'pointingoffsetsigdev' in glb: del glb['pointingoffsetsigdev']
if 'nchan' in glb: del glb['nchan']
if 'reffreq' in glb: del glb['reffreq']
if 'conjbeams' in glb: del glb['conjbeams']
if 'restoringbeam' in glb: del glb['restoringbeam']
if 'sidelobethreshold' in glb: del glb['sidelobethreshold']
if 'reftime' in glb: del glb['reftime']
if 'gridder' in glb: del glb['gridder']
if 'cycleniter' in glb: del glb['cycleniter']
if 'imagename' in glb: del glb['imagename']
if 'minpsffraction' in glb: del glb['minpsffraction']
if 'imsize' in glb: del glb['imsize']
if 'scan' in glb: del glb['scan']
if 'vis' in glb: del glb['vis']
if 'outlierfile' in glb: del glb['outlierfile']
if 'computepastep' in glb: del glb['computepastep']
if 'minpercentchange' in glb: del glb['minpercentchange']
if 'fastnoise' in glb: del glb['fastnoise']
if 'wbawp' in glb: del glb['wbawp']
if 'docompress' in glb: del glb['docompress']
if 'interactive' in glb: del glb['interactive']
if 'specmode' in glb: del glb['specmode']
if 'npixels' in glb: del glb['npixels']
if 'mosweight' in glb: del glb['mosweight']
if 'pbcor' in glb: del glb['pbcor']
if 'calcres' in glb: del glb['calcres']
if 'normtype' in glb: del glb['normtype']
if 'uvtaper' in glb: del glb['uvtaper']
if 'cyclefactor' in glb: del glb['cyclefactor']
if 'toTb' in glb: del glb['toTb']
if 'restfreq' in glb: del glb['restfreq']
if 'imageprefix' in glb: del glb['imageprefix']
if 'pbmask' in glb: del glb['pbmask']
if 'growiterations' in glb: del glb['growiterations']
if 'gain' in glb: del glb['gain']
if 'scales' in glb: del glb['scales']
if 'twidth' in glb: del glb['twidth']
if 'psfcutoff' in glb: del glb['psfcutoff']
if 'robust' in glb: del glb['robust']
if 'vptable' in glb: del glb['vptable']
if 'perchanweightdensity' in glb: del glb['perchanweightdensity']
if 'aterm' in glb: del glb['aterm']
if 'imagesuffix' in glb: del glb['imagesuffix']
if 'usephacenter' in glb: del glb['usephacenter']
if 'usepointing' in glb: del glb['usepointing']
if 'rotatepastep' in glb: del glb['rotatepastep']
if 'threshold' in glb: del glb['threshold']
if 'ncpu' in glb: del glb['ncpu']
if 'veltype' in glb: del glb['veltype']
if 'calcpsf' in glb: del glb['calcpsf']
if 'usemask' in glb: del glb['usemask']
if 'restoration' in glb: del glb['restoration']
if 'niter' in glb: del glb['niter']
if 'outframe' in glb: del glb['outframe']
if 'dogrowprune' in glb: del glb['dogrowprune']
if 'cell' in glb: del glb['cell']
if 'uvrange' in glb: del glb['uvrange']
if 'psterm' in glb: del glb['psterm']
if 'phasecenter' in glb: del glb['phasecenter']
if 'overwrite' in glb: del glb['overwrite']
if 'restart' in glb: del glb['restart']
if 'start' in glb: del glb['start']
if 'observation' in glb: del glb['observation']
if 'lownoisethreshold' in glb: del glb['lownoisethreshold']
if 'facets' in glb: del glb['facets']
if 'noisethreshold' in glb: del glb['noisethreshold']
if 'width' in glb: del glb['width']
if 'spw' in glb: del glb['spw']
if 'selectdata' in glb: del glb['selectdata']
if 'timerange' in glb: del glb['timerange']
if 'parallel' in glb: del glb['parallel']
if 'nsigma' in glb: del glb['nsigma']
if 'cfcache' in glb: del glb['cfcache']
if 'wprojplanes' in glb: del glb['wprojplanes']
if 'startmodel' in glb: del glb['startmodel']
#--------- inp function -----------------------------------------------------------
def inp(self):
print("# ptclean6 -- %s" % self._info_desc_)
self.term_width, self.term_height = shutil.get_terminal_size(fallback=(80, 24))
self.__vis_inp( )
self.__imageprefix_inp( )
self.__imagesuffix_inp( )
self.__ncpu_inp( )
self.__twidth_inp( )
self.__doreg_inp( )
self.__usephacenter_inp( )
self.__reftime_inp( )
self.__toTb_inp( )
self.__sclfactor_inp( )
self.__subregion_inp( )
self.__docompress_inp( )
self.__overwrite_inp( )
self.__selectdata_inp( )
self.__field_inp( )
self.__spw_inp( )
self.__timerange_inp( )
self.__uvrange_inp( )
self.__antenna_inp( )
self.__scan_inp( )
self.__observation_inp( )
self.__intent_inp( )
self.__datacolumn_inp( )
self.__imagename_inp( )
self.__imsize_inp( )
self.__cell_inp( )
self.__phasecenter_inp( )
self.__stokes_inp( )
self.__projection_inp( )
self.__startmodel_inp( )
self.__specmode_inp( )
self.__reffreq_inp( )
self.__nchan_inp( )
self.__start_inp( )
self.__width_inp( )
self.__outframe_inp( )
self.__veltype_inp( )
self.__restfreq_inp( )
self.__interpolation_inp( )
self.__perchanweightdensity_inp( )
self.__gridder_inp( )
self.__facets_inp( )
self.__psfphasecenter_inp( )
self.__wprojplanes_inp( )
self.__vptable_inp( )
self.__mosweight_inp( )
self.__aterm_inp( )
self.__psterm_inp( )
self.__wbawp_inp( )
self.__conjbeams_inp( )
self.__cfcache_inp( )
self.__usepointing_inp( )
self.__computepastep_inp( )
self.__rotatepastep_inp( )
self.__pointingoffsetsigdev_inp( )
self.__pblimit_inp( )
self.__normtype_inp( )
self.__deconvolver_inp( )
self.__scales_inp( )
self.__nterms_inp( )
self.__smallscalebias_inp( )
self.__restoration_inp( )
self.__restoringbeam_inp( )
self.__pbcor_inp( )
self.__outlierfile_inp( )
self.__weighting_inp( )
self.__robust_inp( )
self.__noise_inp( )
self.__npixels_inp( )
self.__uvtaper_inp( )
self.__niter_inp( )
self.__gain_inp( )
self.__threshold_inp( )
self.__nsigma_inp( )
self.__cycleniter_inp( )
self.__cyclefactor_inp( )
self.__minpsffraction_inp( )
self.__maxpsffraction_inp( )
self.__interactive_inp( )
self.__usemask_inp( )
self.__mask_inp( )
self.__pbmask_inp( )
self.__sidelobethreshold_inp( )
self.__noisethreshold_inp( )
self.__lownoisethreshold_inp( )
self.__negativethreshold_inp( )
self.__smoothfactor_inp( )
self.__minbeamfrac_inp( )
self.__cutthreshold_inp( )
self.__growiterations_inp( )
self.__dogrowprune_inp( )
self.__minpercentchange_inp( )
self.__verbose_inp( )
self.__fastnoise_inp( )
self.__restart_inp( )
self.__savemodel_inp( )
self.__calcres_inp( )
self.__calcpsf_inp( )
self.__psfcutoff_inp( )
self.__parallel_inp( )
#--------- tget function ----------------------------------------------------------
@static_var('state', __sf__('casa_inp_go_state'))
def tget(self,file=None):
from casashell.private.stack_manip import find_frame
from runpy import run_path
filename = None
if file is None:
if os.path.isfile("ptclean6.last"):
filename = "ptclean6.last"
elif isinstance(file, str):
if os.path.isfile(file):
filename = file
if filename is not None:
glob = find_frame( )
newglob = run_path( filename, init_globals={ } )
for i in newglob:
glob[i] = newglob[i]
self.tget.state['last'] = self
else:
print("could not find last file, setting defaults instead...")
self.set_global_defaults( )
def __call__( self, vis=None, imageprefix=None, imagesuffix=None, ncpu=None, twidth=None, doreg=None, usephacenter=None, reftime=None, toTb=None, sclfactor=None, subregion=None, docompress=None, overwrite=None, selectdata=None, field=None, spw=None, timerange=None, uvrange=None, antenna=None, scan=None, observation=None, intent=None, datacolumn=None, imagename=None, imsize=None, cell=None, phasecenter=None, stokes=None, projection=None, startmodel=None, specmode=None, reffreq=None, nchan=None, start=None, width=None, outframe=None, veltype=None, restfreq=None, interpolation=None, perchanweightdensity=None, gridder=None, facets=None, psfphasecenter=None, wprojplanes=None, vptable=None, mosweight=None, aterm=None, psterm=None, wbawp=None, conjbeams=None, cfcache=None, usepointing=None, computepastep=None, rotatepastep=None, pointingoffsetsigdev=None, pblimit=None, normtype=None, deconvolver=None, scales=None, nterms=None, smallscalebias=None, restoration=None, restoringbeam=None, pbcor=None, outlierfile=None, weighting=None, robust=None, noise=None, npixels=None, uvtaper=None, niter=None, gain=None, threshold=None, nsigma=None, cycleniter=None, cyclefactor=None, minpsffraction=None, maxpsffraction=None, interactive=None, usemask=None, mask=None, pbmask=None, sidelobethreshold=None, noisethreshold=None, lownoisethreshold=None, negativethreshold=None, smoothfactor=None, minbeamfrac=None, cutthreshold=None, growiterations=None, dogrowprune=None, minpercentchange=None, verbose=None, fastnoise=None, restart=None, savemodel=None, calcres=None, calcpsf=None, psfcutoff=None, parallel=None ):
def noobj(s):
if s.startswith('<') and s.endswith('>'):
return "None"
else:
return s
_prefile = os.path.realpath('ptclean6.pre')
_postfile = os.path.realpath('ptclean6.last')
_return_result_ = None
_arguments = [vis,imageprefix,imagesuffix,ncpu,twidth,doreg,usephacenter,reftime,toTb,sclfactor,subregion,docompress,overwrite,selectdata,field,spw,timerange,uvrange,antenna,scan,observation,intent,datacolumn,imagename,imsize,cell,phasecenter,stokes,projection,startmodel,specmode,reffreq,nchan,start,width,outframe,veltype,restfreq,interpolation,perchanweightdensity,gridder,facets,psfphasecenter,wprojplanes,vptable,mosweight,aterm,psterm,wbawp,conjbeams,cfcache,usepointing,computepastep,rotatepastep,pointingoffsetsigdev,pblimit,normtype,deconvolver,scales,nterms,smallscalebias,restoration,restoringbeam,pbcor,outlierfile,weighting,robust,noise,npixels,uvtaper,niter,gain,threshold,nsigma,cycleniter,cyclefactor,minpsffraction,maxpsffraction,interactive,usemask,mask,pbmask,sidelobethreshold,noisethreshold,lownoisethreshold,negativethreshold,smoothfactor,minbeamfrac,cutthreshold,growiterations,dogrowprune,minpercentchange,verbose,fastnoise,restart,savemodel,calcres,calcpsf,psfcutoff,parallel]
_invocation_parameters = OrderedDict( )
if any(map(lambda x: x is not None,_arguments)):
# invoke python style
# set the non sub-parameters that are not None
local_global = { }
if vis is not None: local_global['vis'] = vis
if imageprefix is not None: local_global['imageprefix'] = imageprefix
if imagesuffix is not None: local_global['imagesuffix'] = imagesuffix
if ncpu is not None: local_global['ncpu'] = ncpu
if twidth is not None: local_global['twidth'] = twidth
if doreg is not None: local_global['doreg'] = doreg
if overwrite is not None: local_global['overwrite'] = overwrite
if selectdata is not None: local_global['selectdata'] = selectdata
if datacolumn is not None: local_global['datacolumn'] = datacolumn
if imagename is not None: local_global['imagename'] = imagename
if imsize is not None: local_global['imsize'] = imsize
if cell is not None: local_global['cell'] = cell
if phasecenter is not None: local_global['phasecenter'] = phasecenter
if stokes is not None: local_global['stokes'] = stokes
if projection is not None: local_global['projection'] = projection
if startmodel is not None: local_global['startmodel'] = startmodel
if specmode is not None: local_global['specmode'] = specmode
if gridder is not None: local_global['gridder'] = gridder
if deconvolver is not None: local_global['deconvolver'] = deconvolver
if restoration is not None: local_global['restoration'] = restoration
if outlierfile is not None: local_global['outlierfile'] = outlierfile
if weighting is not None: local_global['weighting'] = weighting
if niter is not None: local_global['niter'] = niter
if usemask is not None: local_global['usemask'] = usemask
if fastnoise is not None: local_global['fastnoise'] = fastnoise
if restart is not None: local_global['restart'] = restart
if savemodel is not None: local_global['savemodel'] = savemodel
if calcres is not None: local_global['calcres'] = calcres
if calcpsf is not None: local_global['calcpsf'] = calcpsf
if parallel is not None: local_global['parallel'] = parallel
# the invocation parameters for the non-subparameters can now be set - this picks up those defaults
_invocation_parameters['vis'] = self.__vis( local_global )
_invocation_parameters['imageprefix'] = self.__imageprefix( local_global )
_invocation_parameters['imagesuffix'] = self.__imagesuffix( local_global )
_invocation_parameters['ncpu'] = self.__ncpu( local_global )
_invocation_parameters['twidth'] = self.__twidth( local_global )
_invocation_parameters['doreg'] = self.__doreg( local_global )
_invocation_parameters['overwrite'] = self.__overwrite( local_global )
_invocation_parameters['selectdata'] = self.__selectdata( local_global )
_invocation_parameters['datacolumn'] = self.__datacolumn( local_global )
_invocation_parameters['imagename'] = self.__imagename( local_global )
_invocation_parameters['imsize'] = self.__imsize( local_global )
_invocation_parameters['cell'] = self.__cell( local_global )
_invocation_parameters['phasecenter'] = self.__phasecenter( local_global )
_invocation_parameters['stokes'] = self.__stokes( local_global )
_invocation_parameters['projection'] = self.__projection( local_global )
_invocation_parameters['startmodel'] = self.__startmodel( local_global )
_invocation_parameters['specmode'] = self.__specmode( local_global )
_invocation_parameters['gridder'] = self.__gridder( local_global )
_invocation_parameters['deconvolver'] = self.__deconvolver( local_global )
_invocation_parameters['restoration'] = self.__restoration( local_global )
_invocation_parameters['outlierfile'] = self.__outlierfile( local_global )
_invocation_parameters['weighting'] = self.__weighting( local_global )
_invocation_parameters['niter'] = self.__niter( local_global )
_invocation_parameters['usemask'] = self.__usemask( local_global )
_invocation_parameters['fastnoise'] = self.__fastnoise( local_global )
_invocation_parameters['restart'] = self.__restart( local_global )
_invocation_parameters['savemodel'] = self.__savemodel( local_global )
_invocation_parameters['calcres'] = self.__calcres( local_global )
_invocation_parameters['calcpsf'] = self.__calcpsf( local_global )
_invocation_parameters['parallel'] = self.__parallel( local_global )
# the sub-parameters can then be set. Use the supplied value if not None, else the function, which gets the appropriate default
_invocation_parameters['usephacenter'] = self.__usephacenter( _invocation_parameters ) if usephacenter is None else usephacenter
_invocation_parameters['reftime'] = self.__reftime( _invocation_parameters ) if reftime is None else reftime
_invocation_parameters['toTb'] = self.__toTb( _invocation_parameters ) if toTb is None else toTb
_invocation_parameters['sclfactor'] = self.__sclfactor( _invocation_parameters ) if sclfactor is None else sclfactor
_invocation_parameters['subregion'] = self.__subregion( _invocation_parameters ) if subregion is None else subregion
_invocation_parameters['docompress'] = self.__docompress( _invocation_parameters ) if docompress is None else docompress
_invocation_parameters['field'] = self.__field( _invocation_parameters ) if field is None else field
_invocation_parameters['spw'] = self.__spw( _invocation_parameters ) if spw is None else spw
_invocation_parameters['timerange'] = self.__timerange( _invocation_parameters ) if timerange is None else timerange
_invocation_parameters['uvrange'] = self.__uvrange( _invocation_parameters ) if uvrange is None else uvrange
_invocation_parameters['antenna'] = self.__antenna( _invocation_parameters ) if antenna is None else antenna
_invocation_parameters['scan'] = self.__scan( _invocation_parameters ) if scan is None else scan
_invocation_parameters['observation'] = self.__observation( _invocation_parameters ) if observation is None else observation
_invocation_parameters['intent'] = self.__intent( _invocation_parameters ) if intent is None else intent
_invocation_parameters['reffreq'] = self.__reffreq( _invocation_parameters ) if reffreq is None else reffreq
_invocation_parameters['nchan'] = self.__nchan( _invocation_parameters ) if nchan is None else nchan
_invocation_parameters['start'] = self.__start( _invocation_parameters ) if start is None else start
_invocation_parameters['width'] = self.__width( _invocation_parameters ) if width is None else width
_invocation_parameters['outframe'] = self.__outframe( _invocation_parameters ) if outframe is None else outframe
_invocation_parameters['veltype'] = self.__veltype( _invocation_parameters ) if veltype is None else veltype
_invocation_parameters['restfreq'] = self.__restfreq( _invocation_parameters ) if restfreq is None else restfreq
_invocation_parameters['interpolation'] = self.__interpolation( _invocation_parameters ) if interpolation is None else interpolation
_invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( _invocation_parameters ) if perchanweightdensity is None else perchanweightdensity
_invocation_parameters['facets'] = self.__facets( _invocation_parameters ) if facets is None else facets
_invocation_parameters['psfphasecenter'] = self.__psfphasecenter( _invocation_parameters ) if psfphasecenter is None else psfphasecenter
_invocation_parameters['wprojplanes'] = self.__wprojplanes( _invocation_parameters ) if wprojplanes is None else wprojplanes
_invocation_parameters['vptable'] = self.__vptable( _invocation_parameters ) if vptable is None else vptable
_invocation_parameters['mosweight'] = self.__mosweight( _invocation_parameters ) if mosweight is None else mosweight
_invocation_parameters['aterm'] = self.__aterm( _invocation_parameters ) if aterm is None else aterm
_invocation_parameters['psterm'] = self.__psterm( _invocation_parameters ) if psterm is None else psterm
_invocation_parameters['wbawp'] = self.__wbawp( _invocation_parameters ) if wbawp is None else wbawp
_invocation_parameters['conjbeams'] = self.__conjbeams( _invocation_parameters ) if conjbeams is None else conjbeams
_invocation_parameters['cfcache'] = self.__cfcache( _invocation_parameters ) if cfcache is None else cfcache
_invocation_parameters['usepointing'] = self.__usepointing( _invocation_parameters ) if usepointing is None else usepointing
_invocation_parameters['computepastep'] = self.__computepastep( _invocation_parameters ) if computepastep is None else computepastep
_invocation_parameters['rotatepastep'] = self.__rotatepastep( _invocation_parameters ) if rotatepastep is None else rotatepastep
_invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( _invocation_parameters ) if pointingoffsetsigdev is None else pointingoffsetsigdev
_invocation_parameters['pblimit'] = self.__pblimit( _invocation_parameters ) if pblimit is None else pblimit
_invocation_parameters['normtype'] = self.__normtype( _invocation_parameters ) if normtype is None else normtype
_invocation_parameters['scales'] = self.__scales( _invocation_parameters ) if scales is None else scales
_invocation_parameters['nterms'] = self.__nterms( _invocation_parameters ) if nterms is None else nterms
_invocation_parameters['smallscalebias'] = self.__smallscalebias( _invocation_parameters ) if smallscalebias is None else smallscalebias
_invocation_parameters['restoringbeam'] = self.__restoringbeam( _invocation_parameters ) if restoringbeam is None else restoringbeam
_invocation_parameters['pbcor'] = self.__pbcor( _invocation_parameters ) if pbcor is None else pbcor
_invocation_parameters['robust'] = self.__robust( _invocation_parameters ) if robust is None else robust
_invocation_parameters['noise'] = self.__noise( _invocation_parameters ) if noise is None else noise
_invocation_parameters['npixels'] = self.__npixels( _invocation_parameters ) if npixels is None else npixels
_invocation_parameters['uvtaper'] = self.__uvtaper( _invocation_parameters ) if uvtaper is None else uvtaper
_invocation_parameters['gain'] = self.__gain( _invocation_parameters ) if gain is None else gain
_invocation_parameters['threshold'] = self.__threshold( _invocation_parameters ) if threshold is None else threshold
_invocation_parameters['nsigma'] = self.__nsigma( _invocation_parameters ) if nsigma is None else nsigma
_invocation_parameters['cycleniter'] = self.__cycleniter( _invocation_parameters ) if cycleniter is None else cycleniter
_invocation_parameters['cyclefactor'] = self.__cyclefactor( _invocation_parameters ) if cyclefactor is None else cyclefactor
_invocation_parameters['minpsffraction'] = self.__minpsffraction( _invocation_parameters ) if minpsffraction is None else minpsffraction
_invocation_parameters['maxpsffraction'] = self.__maxpsffraction( _invocation_parameters ) if maxpsffraction is None else maxpsffraction
_invocation_parameters['interactive'] = self.__interactive( _invocation_parameters ) if interactive is None else interactive
_invocation_parameters['mask'] = self.__mask( _invocation_parameters ) if mask is None else mask
_invocation_parameters['pbmask'] = self.__pbmask( _invocation_parameters ) if pbmask is None else pbmask
_invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( _invocation_parameters ) if sidelobethreshold is None else sidelobethreshold
_invocation_parameters['noisethreshold'] = self.__noisethreshold( _invocation_parameters ) if noisethreshold is None else noisethreshold
_invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( _invocation_parameters ) if lownoisethreshold is None else lownoisethreshold
_invocation_parameters['negativethreshold'] = self.__negativethreshold( _invocation_parameters ) if negativethreshold is None else negativethreshold
_invocation_parameters['smoothfactor'] = self.__smoothfactor( _invocation_parameters ) if smoothfactor is None else smoothfactor
_invocation_parameters['minbeamfrac'] = self.__minbeamfrac( _invocation_parameters ) if minbeamfrac is None else minbeamfrac
_invocation_parameters['cutthreshold'] = self.__cutthreshold( _invocation_parameters ) if cutthreshold is None else cutthreshold
_invocation_parameters['growiterations'] = self.__growiterations( _invocation_parameters ) if growiterations is None else growiterations
_invocation_parameters['dogrowprune'] = self.__dogrowprune( _invocation_parameters ) if dogrowprune is None else dogrowprune
_invocation_parameters['minpercentchange'] = self.__minpercentchange( _invocation_parameters ) if minpercentchange is None else minpercentchange
_invocation_parameters['verbose'] = self.__verbose( _invocation_parameters ) if verbose is None else verbose
_invocation_parameters['psfcutoff'] = self.__psfcutoff( _invocation_parameters ) if psfcutoff is None else psfcutoff
else:
# invoke with inp/go semantics
_invocation_parameters['vis'] = self.__vis( self.__globals_( ) )
_invocation_parameters['imageprefix'] = self.__imageprefix( self.__globals_( ) )
_invocation_parameters['imagesuffix'] = self.__imagesuffix( self.__globals_( ) )
_invocation_parameters['ncpu'] = self.__ncpu( self.__globals_( ) )
_invocation_parameters['twidth'] = self.__twidth( self.__globals_( ) )
_invocation_parameters['doreg'] = self.__doreg( self.__globals_( ) )
_invocation_parameters['usephacenter'] = self.__usephacenter( self.__globals_( ) )
_invocation_parameters['reftime'] = self.__reftime( self.__globals_( ) )
_invocation_parameters['toTb'] = self.__toTb( self.__globals_( ) )
_invocation_parameters['sclfactor'] = self.__sclfactor( self.__globals_( ) )
_invocation_parameters['subregion'] = self.__subregion( self.__globals_( ) )
_invocation_parameters['docompress'] = self.__docompress( self.__globals_( ) )
_invocation_parameters['overwrite'] = self.__overwrite( self.__globals_( ) )
_invocation_parameters['selectdata'] = self.__selectdata( self.__globals_( ) )
_invocation_parameters['field'] = self.__field( self.__globals_( ) )
_invocation_parameters['spw'] = self.__spw( self.__globals_( ) )
_invocation_parameters['timerange'] = self.__timerange( self.__globals_( ) )
_invocation_parameters['uvrange'] = self.__uvrange( self.__globals_( ) )
_invocation_parameters['antenna'] = self.__antenna( self.__globals_( ) )
_invocation_parameters['scan'] = self.__scan( self.__globals_( ) )
_invocation_parameters['observation'] = self.__observation( self.__globals_( ) )
_invocation_parameters['intent'] = self.__intent( self.__globals_( ) )
_invocation_parameters['datacolumn'] = self.__datacolumn( self.__globals_( ) )
_invocation_parameters['imagename'] = self.__imagename( self.__globals_( ) )
_invocation_parameters['imsize'] = self.__imsize( self.__globals_( ) )
_invocation_parameters['cell'] = self.__cell( self.__globals_( ) )
_invocation_parameters['phasecenter'] = self.__phasecenter( self.__globals_( ) )
_invocation_parameters['stokes'] = self.__stokes( self.__globals_( ) )
_invocation_parameters['projection'] = self.__projection( self.__globals_( ) )
_invocation_parameters['startmodel'] = self.__startmodel( self.__globals_( ) )
_invocation_parameters['specmode'] = self.__specmode( self.__globals_( ) )
_invocation_parameters['reffreq'] = self.__reffreq( self.__globals_( ) )
_invocation_parameters['nchan'] = self.__nchan( self.__globals_( ) )
_invocation_parameters['start'] = self.__start( self.__globals_( ) )
_invocation_parameters['width'] = self.__width( self.__globals_( ) )
_invocation_parameters['outframe'] = self.__outframe( self.__globals_( ) )
_invocation_parameters['veltype'] = self.__veltype( self.__globals_( ) )
_invocation_parameters['restfreq'] = self.__restfreq( self.__globals_( ) )
_invocation_parameters['interpolation'] = self.__interpolation( self.__globals_( ) )
_invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( self.__globals_( ) )
_invocation_parameters['gridder'] = self.__gridder( self.__globals_( ) )
_invocation_parameters['facets'] = self.__facets( self.__globals_( ) )
_invocation_parameters['psfphasecenter'] = self.__psfphasecenter( self.__globals_( ) )
_invocation_parameters['wprojplanes'] = self.__wprojplanes( self.__globals_( ) )
_invocation_parameters['vptable'] = self.__vptable( self.__globals_( ) )
_invocation_parameters['mosweight'] = self.__mosweight( self.__globals_( ) )
_invocation_parameters['aterm'] = self.__aterm( self.__globals_( ) )
_invocation_parameters['psterm'] = self.__psterm( self.__globals_( ) )
_invocation_parameters['wbawp'] = self.__wbawp( self.__globals_( ) )
_invocation_parameters['conjbeams'] = self.__conjbeams( self.__globals_( ) )
_invocation_parameters['cfcache'] = self.__cfcache( self.__globals_( ) )
_invocation_parameters['usepointing'] = self.__usepointing( self.__globals_( ) )
_invocation_parameters['computepastep'] = self.__computepastep( self.__globals_( ) )
_invocation_parameters['rotatepastep'] = self.__rotatepastep( self.__globals_( ) )
_invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( self.__globals_( ) )
_invocation_parameters['pblimit'] = self.__pblimit( self.__globals_( ) )
_invocation_parameters['normtype'] = self.__normtype( self.__globals_( ) )
_invocation_parameters['deconvolver'] = self.__deconvolver( self.__globals_( ) )
_invocation_parameters['scales'] = self.__scales( self.__globals_( ) )
_invocation_parameters['nterms'] = self.__nterms( self.__globals_( ) )
_invocation_parameters['smallscalebias'] = self.__smallscalebias( self.__globals_( ) )
_invocation_parameters['restoration'] = self.__restoration( self.__globals_( ) )
_invocation_parameters['restoringbeam'] = self.__restoringbeam( self.__globals_( ) )
_invocation_parameters['pbcor'] = self.__pbcor( self.__globals_( ) )
_invocation_parameters['outlierfile'] = self.__outlierfile( self.__globals_( ) )
_invocation_parameters['weighting'] = self.__weighting( self.__globals_( ) )
_invocation_parameters['robust'] = self.__robust( self.__globals_( ) )
_invocation_parameters['noise'] = self.__noise( self.__globals_( ) )
_invocation_parameters['npixels'] = self.__npixels( self.__globals_( ) )
_invocation_parameters['uvtaper'] = self.__uvtaper( self.__globals_( ) )
_invocation_parameters['niter'] = self.__niter( self.__globals_( ) )
_invocation_parameters['gain'] = self.__gain( self.__globals_( ) )
_invocation_parameters['threshold'] = self.__threshold( self.__globals_( ) )
_invocation_parameters['nsigma'] = self.__nsigma( self.__globals_( ) )
_invocation_parameters['cycleniter'] = self.__cycleniter( self.__globals_( ) )
_invocation_parameters['cyclefactor'] = self.__cyclefactor( self.__globals_( ) )
_invocation_parameters['minpsffraction'] = self.__minpsffraction( self.__globals_( ) )
_invocation_parameters['maxpsffraction'] = self.__maxpsffraction( self.__globals_( ) )
_invocation_parameters['interactive'] = self.__interactive( self.__globals_( ) )
_invocation_parameters['usemask'] = self.__usemask( self.__globals_( ) )
_invocation_parameters['mask'] = self.__mask( self.__globals_( ) )
_invocation_parameters['pbmask'] = self.__pbmask( self.__globals_( ) )
_invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( self.__globals_( ) )
_invocation_parameters['noisethreshold'] = self.__noisethreshold( self.__globals_( ) )
_invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( self.__globals_( ) )
_invocation_parameters['negativethreshold'] = self.__negativethreshold( self.__globals_( ) )
_invocation_parameters['smoothfactor'] = self.__smoothfactor( self.__globals_( ) )
_invocation_parameters['minbeamfrac'] = self.__minbeamfrac( self.__globals_( ) )
_invocation_parameters['cutthreshold'] = self.__cutthreshold( self.__globals_( ) )
_invocation_parameters['growiterations'] = self.__growiterations( self.__globals_( ) )
_invocation_parameters['dogrowprune'] = self.__dogrowprune( self.__globals_( ) )
_invocation_parameters['minpercentchange'] = self.__minpercentchange( self.__globals_( ) )
_invocation_parameters['verbose'] = self.__verbose( self.__globals_( ) )
_invocation_parameters['fastnoise'] = self.__fastnoise( self.__globals_( ) )
_invocation_parameters['restart'] = self.__restart( self.__globals_( ) )
_invocation_parameters['savemodel'] = self.__savemodel( self.__globals_( ) )
_invocation_parameters['calcres'] = self.__calcres( self.__globals_( ) )
_invocation_parameters['calcpsf'] = self.__calcpsf( self.__globals_( ) )
_invocation_parameters['psfcutoff'] = self.__psfcutoff( self.__globals_( ) )
_invocation_parameters['parallel'] = self.__parallel( self.__globals_( ) )
try:
with open(_prefile,'w') as _f:
for _i in _invocation_parameters:
_f.write("%-20s = %s\n" % (_i,noobj(repr(_invocation_parameters[_i]))))
_f.write("#ptclean6( ")
count = 0
for _i in _invocation_parameters:
_f.write("%s=%s" % (_i,noobj(repr(_invocation_parameters[_i]))))
count += 1
if count < len(_invocation_parameters): _f.write(",")
_f.write(" )\n")
except: pass
try:
_return_result_ = _ptclean6_t( _invocation_parameters['vis'],_invocation_parameters['imageprefix'],_invocation_parameters['imagesuffix'],_invocation_parameters['ncpu'],_invocation_parameters['twidth'],_invocation_parameters['doreg'],_invocation_parameters['usephacenter'],_invocation_parameters['reftime'],_invocation_parameters['toTb'],_invocation_parameters['sclfactor'],_invocation_parameters['subregion'],_invocation_parameters['docompress'],_invocation_parameters['overwrite'],_invocation_parameters['selectdata'],_invocation_parameters['field'],_invocation_parameters['spw'],_invocation_parameters['timerange'],_invocation_parameters['uvrange'],_invocation_parameters['antenna'],_invocation_parameters['scan'],_invocation_parameters['observation'],_invocation_parameters['intent'],_invocation_parameters['datacolumn'],_invocation_parameters['imagename'],_invocation_parameters['imsize'],_invocation_parameters['cell'],_invocation_parameters['phasecenter'],_invocation_parameters['stokes'],_invocation_parameters['projection'],_invocation_parameters['startmodel'],_invocation_parameters['specmode'],_invocation_parameters['reffreq'],_invocation_parameters['nchan'],_invocation_parameters['start'],_invocation_parameters['width'],_invocation_parameters['outframe'],_invocation_parameters['veltype'],_invocation_parameters['restfreq'],_invocation_parameters['interpolation'],_invocation_parameters['perchanweightdensity'],_invocation_parameters['gridder'],_invocation_parameters['facets'],_invocation_parameters['psfphasecenter'],_invocation_parameters['wprojplanes'],_invocation_parameters['vptable'],_invocation_parameters['mosweight'],_invocation_parameters['aterm'],_invocation_parameters['psterm'],_invocation_parameters['wbawp'],_invocation_parameters['conjbeams'],_invocation_parameters['cfcache'],_invocation_parameters['usepointing'],_invocation_parameters['computepastep'],_invocation_parameters['rotatepastep'],_invocation_parameters['pointingoffsetsigdev'],_invocation_parameters['pblimit'],_invocation_parameters['normtype'],_invocation_parameters['deconvolver'],_invocation_parameters['scales'],_invocation_parameters['nterms'],_invocation_parameters['smallscalebias'],_invocation_parameters['restoration'],_invocation_parameters['restoringbeam'],_invocation_parameters['pbcor'],_invocation_parameters['outlierfile'],_invocation_parameters['weighting'],_invocation_parameters['robust'],_invocation_parameters['noise'],_invocation_parameters['npixels'],_invocation_parameters['uvtaper'],_invocation_parameters['niter'],_invocation_parameters['gain'],_invocation_parameters['threshold'],_invocation_parameters['nsigma'],_invocation_parameters['cycleniter'],_invocation_parameters['cyclefactor'],_invocation_parameters['minpsffraction'],_invocation_parameters['maxpsffraction'],_invocation_parameters['interactive'],_invocation_parameters['usemask'],_invocation_parameters['mask'],_invocation_parameters['pbmask'],_invocation_parameters['sidelobethreshold'],_invocation_parameters['noisethreshold'],_invocation_parameters['lownoisethreshold'],_invocation_parameters['negativethreshold'],_invocation_parameters['smoothfactor'],_invocation_parameters['minbeamfrac'],_invocation_parameters['cutthreshold'],_invocation_parameters['growiterations'],_invocation_parameters['dogrowprune'],_invocation_parameters['minpercentchange'],_invocation_parameters['verbose'],_invocation_parameters['fastnoise'],_invocation_parameters['restart'],_invocation_parameters['savemodel'],_invocation_parameters['calcres'],_invocation_parameters['calcpsf'],_invocation_parameters['psfcutoff'],_invocation_parameters['parallel'] )
except Exception as e:
from traceback import format_exc
from casatasks import casalog
casalog.origin('ptclean6')
casalog.post("Exception Reported: Error in ptclean6: %s" % str(e),'SEVERE')
casalog.post(format_exc( ))
_return_result_ = False
try:
os.rename(_prefile,_postfile)
except: pass
return _return_result_
ptclean6 = _ptclean6( )
|
[] |
[] |
[
"SAVE_ALL_AUTOMASKS"
] |
[]
|
["SAVE_ALL_AUTOMASKS"]
|
python
| 1 | 0 | |
.CondaPkg/env/lib/python3.10/site-packages/sympy/external/importtools.py
|
"""Tools to assist importing optional external modules."""
import sys
import re
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
_component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def version_tuple(vstring):
# Parse a version string to a tuple e.g. '1.2' -> (1, 2)
# Simplified from distutils.version.LooseVersion which was deprecated in
# Python 3.10.
components = []
for x in _component_re.split(vstring):
if x and x != '.':
try:
x = int(x)
except ValueError:
pass
components.append(x)
return components
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
import_kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the import_kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... import_kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... import_kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning, stacklevel=2)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **import_kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = import_kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning,
stacklevel=2)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)),
stacklevel=2)
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if version_tuple(modversion) < version_tuple(min_module_version):
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, str):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning, stacklevel=2)
return
return mod
|
[] |
[] |
[
"SYMPY_DEBUG"
] |
[]
|
["SYMPY_DEBUG"]
|
python
| 1 | 0 | |
docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
sys.path.insert(0, os.path.abspath("/app"))
os.environ.setdefault("DATABASE_URL", "")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "Covid-19 PH"
copyright = """2020, Vicente G. Reyes"""
author = "Vicente G. Reyes"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
contrib/devtools/security-check.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The dogxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics dogxs.
Returns a tuple (arch,dogxs) where arch is 'i386:x86-64' or 'i386'
and dogxs is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
dogxs = 0
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
dogxs = int(tokens[1],16)
return (arch,dogxs)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics dogx 0x40 signifies dynamicbase (ASLR)'''
(arch,dogxs) = get_PE_dll_characteristics(executable)
reqdogxs = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (dogxs & reqdogxs) == reqdogxs
# On 64 dogx, must support high-entropy 64-dogx address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics dogx 0x20 signifies high-entropy ASLR'''
(arch,dogxs) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqdogxs = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-dogx
assert(arch == 'i386')
reqdogxs = 0
return (dogxs & reqdogxs) == reqdogxs
def check_PE_NX(executable):
'''NX: DllCharacteristics dogx 0x100 signifies nxcompat (DEP)'''
(arch,dogxs) = get_PE_dll_characteristics(executable)
return (dogxs & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
|
[] |
[] |
[
"OBJDUMP",
"READELF"
] |
[]
|
["OBJDUMP", "READELF"]
|
python
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "getaride.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
qa/rpc-tests/maxuploadtarget.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("HELIUMD", "heliumd"),
help="heliumd binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
[] |
[] |
[
"HELIUMD"
] |
[]
|
["HELIUMD"]
|
python
| 1 | 0 | |
my_site/my_library/my_library/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_library.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
moderate/comb_sort.go
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
const shrink = 1.25
func combSort(q string) uint {
var (
n, l uint
k, g int
c bool
)
u := strings.Fields(q)
g, c = len(u), false
v := make([]int, len(u))
for ix, i := range u {
fmt.Sscan(i, &k)
v[ix] = k
}
for g > 1 || c {
n, c = n+1, false
if g > 1 {
g = int(float32(g) / shrink)
}
for j := 0; j < len(u)-g; j++ {
if u[j] > u[j+g] {
u[j], u[j+g], c, l = u[j+g], u[j], true, n
}
}
}
return l
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Println(combSort(scanner.Text()))
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/kubelet/cm/devicemanager/manager_test.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devicemanager
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
watcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
const (
testResourceName = "fake-domain/resource"
)
func tmpSocketDir() (socketDir, socketName, pluginSocketName string, err error) {
socketDir, err = ioutil.TempDir("", "device_plugin")
if err != nil {
return
}
socketName = socketDir + "/server.sock"
pluginSocketName = socketDir + "/device-plugin.sock"
os.MkdirAll(socketDir, 0755)
return
}
func TestNewManagerImpl(t *testing.T) {
socketDir, socketName, _, err := tmpSocketDir()
topologyStore := topologymanager.NewFakeManager()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
_, err = newManagerImpl(socketName, nil, topologyStore)
require.NoError(t, err)
os.RemoveAll(socketDir)
}
func TestNewManagerImplStart(t *testing.T) {
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
m, _, p := setup(t, []*pluginapi.Device{}, func(n string, d []pluginapi.Device) {}, socketName, pluginSocketName)
cleanup(t, m, p)
// Stop should tolerate being called more than once.
cleanup(t, m, p)
}
func TestNewManagerImplStartProbeMode(t *testing.T) {
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
m, _, p, _ := setupInProbeMode(t, []*pluginapi.Device{}, func(n string, d []pluginapi.Device) {}, socketName, pluginSocketName)
cleanup(t, m, p)
}
// Tests that the device plugin manager correctly handles registration and re-registration by
// making sure that after registration, devices are correctly updated and if a re-registration
// happens, we will NOT delete devices; and no orphaned devices left.
func TestDevicePluginReRegistration(t *testing.T) {
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
devs := []*pluginapi.Device{
{ID: "Dev1", Health: pluginapi.Healthy},
{ID: "Dev2", Health: pluginapi.Healthy},
}
devsForRegistration := []*pluginapi.Device{
{ID: "Dev3", Health: pluginapi.Healthy},
}
for _, preStartContainerFlag := range []bool{false, true} {
for _, getPreferredAllocationFlag := range []bool{false, true} {
m, ch, p1 := setup(t, devs, nil, socketName, pluginSocketName)
p1.Register(socketName, testResourceName, "")
select {
case <-ch:
case <-time.After(5 * time.Second):
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ := m.GetCapacity()
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, preStartContainerFlag, getPreferredAllocationFlag)
err = p2.Start()
require.NoError(t, err)
p2.Register(socketName, testResourceName, "")
select {
case <-ch:
case <-time.After(5 * time.Second):
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices shouldn't change.")
// Test the scenario that a plugin re-registers with different devices.
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, preStartContainerFlag, getPreferredAllocationFlag)
err = p3.Start()
require.NoError(t, err)
p3.Register(socketName, testResourceName, "")
select {
case <-ch:
case <-time.After(5 * time.Second):
t.Fatalf("timeout while waiting for manager update")
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of plugin previously registered should be removed.")
p2.Stop()
p3.Stop()
cleanup(t, m, p1)
}
}
}
// Tests that the device plugin manager correctly handles registration and re-registration by
// making sure that after registration, devices are correctly updated and if a re-registration
// happens, we will NOT delete devices; and no orphaned devices left.
// While testing above scenario, plugin discovery and registration will be done using
// Kubelet probe based mechanism
func TestDevicePluginReRegistrationProbeMode(t *testing.T) {
socketDir, socketName, pluginSocketName, err := tmpSocketDir()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
devs := []*pluginapi.Device{
{ID: "Dev1", Health: pluginapi.Healthy},
{ID: "Dev2", Health: pluginapi.Healthy},
}
devsForRegistration := []*pluginapi.Device{
{ID: "Dev3", Health: pluginapi.Healthy},
}
m, ch, p1, _ := setupInProbeMode(t, devs, nil, socketName, pluginSocketName)
// Wait for the first callback to be issued.
select {
case <-ch:
case <-time.After(5 * time.Second):
t.FailNow()
}
capacity, allocatable, _ := m.GetCapacity()
resourceCapacity := capacity[v1.ResourceName(testResourceName)]
resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
p2 := NewDevicePluginStub(devs, pluginSocketName+".new", testResourceName, false, false)
err = p2.Start()
require.NoError(t, err)
// Wait for the second callback to be issued.
select {
case <-ch:
case <-time.After(5 * time.Second):
t.FailNow()
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
// Test the scenario that a plugin re-registers with different devices.
p3 := NewDevicePluginStub(devsForRegistration, pluginSocketName+".third", testResourceName, false, false)
err = p3.Start()
require.NoError(t, err)
// Wait for the third callback to be issued.
select {
case <-ch:
case <-time.After(5 * time.Second):
t.FailNow()
}
capacity, allocatable, _ = m.GetCapacity()
resourceCapacity = capacity[v1.ResourceName(testResourceName)]
resourceAllocatable = allocatable[v1.ResourceName(testResourceName)]
require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
require.Equal(t, int64(1), resourceAllocatable.Value(), "Devices of previous registered should be removed")
p2.Stop()
p3.Stop()
cleanup(t, m, p1)
}
func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string) (Manager, <-chan interface{}) {
topologyStore := topologymanager.NewFakeManager()
m, err := newManagerImpl(socketName, nil, topologyStore)
require.NoError(t, err)
updateChan := make(chan interface{})
if callback != nil {
m.callback = callback
}
originalCallback := m.callback
m.callback = func(resourceName string, devices []pluginapi.Device) {
originalCallback(resourceName, devices)
updateChan <- new(interface{})
}
activePods := func() []*v1.Pod {
return []*v1.Pod{}
}
err = m.Start(activePods, &sourcesReadyStub{})
require.NoError(t, err)
return m, updateChan
}
func setupDevicePlugin(t *testing.T, devs []*pluginapi.Device, pluginSocketName string) *Stub {
p := NewDevicePluginStub(devs, pluginSocketName, testResourceName, false, false)
err := p.Start()
require.NoError(t, err)
return p
}
func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) pluginmanager.PluginManager {
pluginManager := pluginmanager.NewPluginManager(
filepath.Dir(pluginSocketName), /* sockDir */
&record.FakeRecorder{},
)
runPluginManager(pluginManager)
pluginManager.AddHandler(watcherapi.DevicePlugin, m.GetWatcherHandler())
return pluginManager
}
func runPluginManager(pluginManager pluginmanager.PluginManager) {
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
go pluginManager.Run(sourcesReady, wait.NeverStop)
}
func setup(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string, pluginSocketName string) (Manager, <-chan interface{}, *Stub) {
m, updateChan := setupDeviceManager(t, devs, callback, socketName)
p := setupDevicePlugin(t, devs, pluginSocketName)
return m, updateChan, p
}
func setupInProbeMode(t *testing.T, devs []*pluginapi.Device, callback monitorCallback, socketName string, pluginSocketName string) (Manager, <-chan interface{}, *Stub, pluginmanager.PluginManager) {
m, updateChan := setupDeviceManager(t, devs, callback, socketName)
pm := setupPluginManager(t, pluginSocketName, m)
p := setupDevicePlugin(t, devs, pluginSocketName)
return m, updateChan, p, pm
}
func cleanup(t *testing.T, m Manager, p *Stub) {
p.Stop()
m.Stop()
}
func TestUpdateCapacityAllocatable(t *testing.T) {
socketDir, socketName, _, err := tmpSocketDir()
topologyStore := topologymanager.NewFakeManager()
require.NoError(t, err)
defer os.RemoveAll(socketDir)
testManager, err := newManagerImpl(socketName, nil, topologyStore)
as := assert.New(t)
as.NotNil(testManager)
as.Nil(err)
devs := []pluginapi.Device{
{ID: "Device1", Health: pluginapi.Healthy},
{ID: "Device2", Health: pluginapi.Healthy},
{ID: "Device3", Health: pluginapi.Unhealthy},
}
callback := testManager.genericDeviceUpdateCallback
// Adds three devices for resource1, two healthy and one unhealthy.
// Expects capacity for resource1 to be 2.
resourceName1 := "domain1.com/resource1"
e1 := &endpointImpl{}
testManager.endpoints[resourceName1] = endpointInfo{e: e1, opts: nil}
callback(resourceName1, devs)
capacity, allocatable, removedResources := testManager.GetCapacity()
resource1Capacity, ok := capacity[v1.ResourceName(resourceName1)]
as.True(ok)
resource1Allocatable, ok := allocatable[v1.ResourceName(resourceName1)]
as.True(ok)
as.Equal(int64(3), resource1Capacity.Value())
as.Equal(int64(2), resource1Allocatable.Value())
as.Equal(0, len(removedResources))
// Deletes an unhealthy device should NOT change allocatable but change capacity.
devs1 := devs[:len(devs)-1]
callback(resourceName1, devs1)
capacity, allocatable, removedResources = testManager.GetCapacity()
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
as.True(ok)
resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)]
as.True(ok)
as.Equal(int64(2), resource1Capacity.Value())
as.Equal(int64(2), resource1Allocatable.Value())
as.Equal(0, len(removedResources))
// Updates a healthy device to unhealthy should reduce allocatable by 1.
devs[1].Health = pluginapi.Unhealthy
callback(resourceName1, devs)
capacity, allocatable, removedResources = testManager.GetCapacity()
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
as.True(ok)
resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)]
as.True(ok)
as.Equal(int64(3), resource1Capacity.Value())
as.Equal(int64(1), resource1Allocatable.Value())
as.Equal(0, len(removedResources))
// Deletes a healthy device should reduce capacity and allocatable by 1.
devs2 := devs[1:]
callback(resourceName1, devs2)
capacity, allocatable, removedResources = testManager.GetCapacity()
resource1Capacity, ok = capacity[v1.ResourceName(resourceName1)]
as.True(ok)
resource1Allocatable, ok = allocatable[v1.ResourceName(resourceName1)]
as.True(ok)
as.Equal(int64(0), resource1Allocatable.Value())
as.Equal(int64(2), resource1Capacity.Value())
as.Equal(0, len(removedResources))
// Tests adding another resource.
resourceName2 := "resource2"
e2 := &endpointImpl{}
testManager.endpoints[resourceName2] = endpointInfo{e: e2, opts: nil}
callback(resourceName2, devs)
capacity, allocatable, removedResources = testManager.GetCapacity()
as.Equal(2, len(capacity))
resource2Capacity, ok := capacity[v1.ResourceName(resourceName2)]
as.True(ok)
resource2Allocatable, ok := allocatable[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(3), resource2Capacity.Value())
as.Equal(int64(1), resource2Allocatable.Value())
as.Equal(0, len(removedResources))
// Expires resourceName1 endpoint. Verifies testManager.GetCapacity() reports that resourceName1
// is removed from capacity and it no longer exists in healthyDevices after the call.
e1.setStopTime(time.Now().Add(-1*endpointStopGracePeriod - time.Duration(10)*time.Second))
capacity, allocatable, removed := testManager.GetCapacity()
as.Equal([]string{resourceName1}, removed)
as.NotContains(capacity, v1.ResourceName(resourceName1))
as.NotContains(allocatable, v1.ResourceName(resourceName1))
val, ok := capacity[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(3), val.Value())
as.NotContains(testManager.healthyDevices, resourceName1)
as.NotContains(testManager.unhealthyDevices, resourceName1)
as.NotContains(testManager.endpoints, resourceName1)
as.Equal(1, len(testManager.endpoints))
// Stops resourceName2 endpoint. Verifies its stopTime is set, allocate and
// preStartContainer calls return errors.
e2.stop()
as.False(e2.stopTime.IsZero())
_, err = e2.allocate([]string{"Device1"})
reflect.DeepEqual(err, fmt.Errorf(errEndpointStopped, e2))
_, err = e2.preStartContainer([]string{"Device1"})
reflect.DeepEqual(err, fmt.Errorf(errEndpointStopped, e2))
// Marks resourceName2 unhealthy and verifies its capacity/allocatable are
// correctly updated.
testManager.markResourceUnhealthy(resourceName2)
capacity, allocatable, removed = testManager.GetCapacity()
val, ok = capacity[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(3), val.Value())
val, ok = allocatable[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(0), val.Value())
as.Empty(removed)
// Writes and re-reads checkpoints. Verifies we create a stopped endpoint
// for resourceName2, its capacity is set to zero, and we still consider
// it as a DevicePlugin resource. This makes sure any pod that was scheduled
// during the time of propagating capacity change to the scheduler will be
// properly rejected instead of being incorrectly started.
err = testManager.writeCheckpoint()
as.Nil(err)
testManager.healthyDevices = make(map[string]sets.String)
testManager.unhealthyDevices = make(map[string]sets.String)
err = testManager.readCheckpoint()
as.Nil(err)
as.Equal(1, len(testManager.endpoints))
as.Contains(testManager.endpoints, resourceName2)
capacity, allocatable, removed = testManager.GetCapacity()
val, ok = capacity[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(0), val.Value())
val, ok = allocatable[v1.ResourceName(resourceName2)]
as.True(ok)
as.Equal(int64(0), val.Value())
as.Empty(removed)
as.True(testManager.isDevicePluginResource(resourceName2))
}
func constructDevices(devices []string) sets.String {
ret := sets.NewString()
for _, dev := range devices {
ret.Insert(dev)
}
return ret
}
func constructAllocResp(devices, mounts, envs map[string]string) *pluginapi.ContainerAllocateResponse {
resp := &pluginapi.ContainerAllocateResponse{}
for k, v := range devices {
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
HostPath: k,
ContainerPath: v,
Permissions: "mrw",
})
}
for k, v := range mounts {
resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
ContainerPath: k,
HostPath: v,
ReadOnly: true,
})
}
resp.Envs = make(map[string]string)
for k, v := range envs {
resp.Envs[k] = v
}
return resp
}
func TestCheckpoint(t *testing.T) {
resourceName1 := "domain1.com/resource1"
resourceName2 := "domain2.com/resource2"
as := assert.New(t)
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err)
testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
checkpointManager: ckm,
}
testManager.podDevices.insert("pod1", "con1", resourceName1,
constructDevices([]string{"dev1", "dev2"}),
constructAllocResp(map[string]string{"/dev/r1dev1": "/dev/r1dev1", "/dev/r1dev2": "/dev/r1dev2"},
map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
testManager.podDevices.insert("pod1", "con1", resourceName2,
constructDevices([]string{"dev1", "dev2"}),
constructAllocResp(map[string]string{"/dev/r2dev1": "/dev/r2dev1", "/dev/r2dev2": "/dev/r2dev2"},
map[string]string{"/home/r2lib1": "/usr/r2lib1"},
map[string]string{"r2devices": "dev1 dev2"}))
testManager.podDevices.insert("pod1", "con2", resourceName1,
constructDevices([]string{"dev3"}),
constructAllocResp(map[string]string{"/dev/r1dev3": "/dev/r1dev3"},
map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
testManager.podDevices.insert("pod2", "con1", resourceName1,
constructDevices([]string{"dev4"}),
constructAllocResp(map[string]string{"/dev/r1dev4": "/dev/r1dev4"},
map[string]string{"/home/r1lib1": "/usr/r1lib1"}, map[string]string{}))
testManager.healthyDevices[resourceName1] = sets.NewString()
testManager.healthyDevices[resourceName1].Insert("dev1")
testManager.healthyDevices[resourceName1].Insert("dev2")
testManager.healthyDevices[resourceName1].Insert("dev3")
testManager.healthyDevices[resourceName1].Insert("dev4")
testManager.healthyDevices[resourceName1].Insert("dev5")
testManager.healthyDevices[resourceName2] = sets.NewString()
testManager.healthyDevices[resourceName2].Insert("dev1")
testManager.healthyDevices[resourceName2].Insert("dev2")
expectedPodDevices := testManager.podDevices
expectedAllocatedDevices := testManager.podDevices.devices()
expectedAllDevices := testManager.healthyDevices
err = testManager.writeCheckpoint()
as.Nil(err)
testManager.podDevices = make(podDevices)
err = testManager.readCheckpoint()
as.Nil(err)
as.Equal(len(expectedPodDevices), len(testManager.podDevices))
for podUID, containerDevices := range expectedPodDevices {
for conName, resources := range containerDevices {
for resource := range resources {
expDevices := expectedPodDevices.containerDevices(podUID, conName, resource)
testDevices := testManager.podDevices.containerDevices(podUID, conName, resource)
as.True(reflect.DeepEqual(expDevices, testDevices))
opts1 := expectedPodDevices.deviceRunContainerOptions(podUID, conName)
opts2 := testManager.podDevices.deviceRunContainerOptions(podUID, conName)
as.Equal(len(opts1.Envs), len(opts2.Envs))
as.Equal(len(opts1.Mounts), len(opts2.Mounts))
as.Equal(len(opts1.Devices), len(opts2.Devices))
}
}
}
as.True(reflect.DeepEqual(expectedAllocatedDevices, testManager.allocatedDevices))
as.True(reflect.DeepEqual(expectedAllDevices, testManager.healthyDevices))
}
type activePodsStub struct {
activePods []*v1.Pod
}
func (a *activePodsStub) getActivePods() []*v1.Pod {
return a.activePods
}
func (a *activePodsStub) updateActivePods(newPods []*v1.Pod) {
a.activePods = newPods
}
type MockEndpoint struct {
getPreferredAllocationFunc func(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error)
allocateFunc func(devs []string) (*pluginapi.AllocateResponse, error)
initChan chan []string
}
func (m *MockEndpoint) stop() {}
func (m *MockEndpoint) run() {}
func (m *MockEndpoint) callback(resourceName string, devices []pluginapi.Device) {}
func (m *MockEndpoint) preStartContainer(devs []string) (*pluginapi.PreStartContainerResponse, error) {
m.initChan <- devs
return &pluginapi.PreStartContainerResponse{}, nil
}
func (m *MockEndpoint) getPreferredAllocation(available, mustInclude []string, size int) (*pluginapi.PreferredAllocationResponse, error) {
if m.getPreferredAllocationFunc != nil {
return m.getPreferredAllocationFunc(available, mustInclude, size)
}
return nil, nil
}
func (m *MockEndpoint) allocate(devs []string) (*pluginapi.AllocateResponse, error) {
if m.allocateFunc != nil {
return m.allocateFunc(devs)
}
return nil, nil
}
func (m *MockEndpoint) isStopped() bool { return false }
func (m *MockEndpoint) stopGracePeriodExpired() bool { return false }
func makePod(limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: limits,
},
},
},
},
}
}
func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestResource) (*ManagerImpl, error) {
monitorCallback := func(resourceName string, devices []pluginapi.Device) {}
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
if err != nil {
return nil, err
}
testManager := &ManagerImpl{
socketdir: tmpDir,
callback: monitorCallback,
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
endpoints: make(map[string]endpointInfo),
podDevices: make(podDevices),
devicesToReuse: make(PodReusableDevices),
topologyAffinityStore: topologymanager.NewFakeManager(),
activePods: activePods,
sourcesReady: &sourcesReadyStub{},
checkpointManager: ckm,
}
for _, res := range testRes {
testManager.healthyDevices[res.resourceName] = sets.NewString()
for _, dev := range res.devs {
testManager.healthyDevices[res.resourceName].Insert(dev)
}
if res.resourceName == "domain1.com/resource1" {
testManager.endpoints[res.resourceName] = endpointInfo{
e: &MockEndpoint{allocateFunc: allocateStubFunc()},
opts: nil,
}
}
if res.resourceName == "domain2.com/resource2" {
testManager.endpoints[res.resourceName] = endpointInfo{
e: &MockEndpoint{
allocateFunc: func(devs []string) (*pluginapi.AllocateResponse, error) {
resp := new(pluginapi.ContainerAllocateResponse)
resp.Envs = make(map[string]string)
for _, dev := range devs {
switch dev {
case "dev3":
resp.Envs["key2"] = "val2"
case "dev4":
resp.Envs["key2"] = "val3"
}
}
resps := new(pluginapi.AllocateResponse)
resps.ContainerResponses = append(resps.ContainerResponses, resp)
return resps, nil
},
},
opts: nil,
}
}
}
return testManager, nil
}
type TestResource struct {
resourceName string
resourceQuantity resource.Quantity
devs []string
}
func TestPodContainerDeviceAllocation(t *testing.T) {
res1 := TestResource{
resourceName: "domain1.com/resource1",
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
devs: []string{"dev1", "dev2"},
}
res2 := TestResource{
resourceName: "domain2.com/resource2",
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
devs: []string{"dev3", "dev4"},
}
testResources := make([]TestResource, 2)
testResources = append(testResources, res1)
testResources = append(testResources, res2)
as := require.New(t)
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err)
testPods := []*v1.Pod{
makePod(v1.ResourceList{
v1.ResourceName(res1.resourceName): res1.resourceQuantity,
v1.ResourceName("cpu"): res1.resourceQuantity,
v1.ResourceName(res2.resourceName): res2.resourceQuantity}),
makePod(v1.ResourceList{
v1.ResourceName(res1.resourceName): res2.resourceQuantity}),
makePod(v1.ResourceList{
v1.ResourceName(res2.resourceName): res2.resourceQuantity}),
}
testCases := []struct {
description string
testPod *v1.Pod
expectedContainerOptsLen []int
expectedAllocatedResName1 int
expectedAllocatedResName2 int
expErr error
}{
{
description: "Successful allocation of two Res1 resources and one Res2 resource",
testPod: testPods[0],
expectedContainerOptsLen: []int{3, 2, 2},
expectedAllocatedResName1: 2,
expectedAllocatedResName2: 1,
expErr: nil,
},
{
description: "Requesting to create a pod without enough resources should fail",
testPod: testPods[1],
expectedContainerOptsLen: nil,
expectedAllocatedResName1: 2,
expectedAllocatedResName2: 1,
expErr: fmt.Errorf("requested number of devices unavailable for domain1.com/resource1. Requested: 1, Available: 0"),
},
{
description: "Successful allocation of all available Res1 resources and Res2 resources",
testPod: testPods[2],
expectedContainerOptsLen: []int{0, 0, 1},
expectedAllocatedResName1: 2,
expectedAllocatedResName2: 2,
expErr: nil,
},
}
activePods := []*v1.Pod{}
for _, testCase := range testCases {
pod := testCase.testPod
activePods = append(activePods, pod)
podsStub.updateActivePods(activePods)
err := testManager.Allocate(pod, &pod.Spec.Containers[0])
if !reflect.DeepEqual(err, testCase.expErr) {
t.Errorf("DevicePluginManager error (%v). expected error: %v but got: %v",
testCase.description, testCase.expErr, err)
}
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0])
if testCase.expErr == nil {
as.Nil(err)
}
if testCase.expectedContainerOptsLen == nil {
as.Nil(runContainerOpts)
} else {
as.Equal(len(runContainerOpts.Devices), testCase.expectedContainerOptsLen[0])
as.Equal(len(runContainerOpts.Mounts), testCase.expectedContainerOptsLen[1])
as.Equal(len(runContainerOpts.Envs), testCase.expectedContainerOptsLen[2])
}
as.Equal(testCase.expectedAllocatedResName1, testManager.allocatedDevices[res1.resourceName].Len())
as.Equal(testCase.expectedAllocatedResName2, testManager.allocatedDevices[res2.resourceName].Len())
}
}
func TestInitContainerDeviceAllocation(t *testing.T) {
// Requesting to create a pod that requests resourceName1 in init containers and normal containers
// should succeed with devices allocated to init containers reallocated to normal containers.
res1 := TestResource{
resourceName: "domain1.com/resource1",
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
devs: []string{"dev1", "dev2"},
}
res2 := TestResource{
resourceName: "domain2.com/resource2",
resourceQuantity: *resource.NewQuantity(int64(1), resource.DecimalSI),
devs: []string{"dev3", "dev4"},
}
testResources := make([]TestResource, 2)
testResources = append(testResources, res1)
testResources = append(testResources, res2)
as := require.New(t)
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err)
podWithPluginResourcesInInitContainers := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(res1.resourceName): res2.resourceQuantity,
},
},
},
{
Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(res1.resourceName): res1.resourceQuantity,
},
},
},
},
Containers: []v1.Container{
{
Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(res1.resourceName): res2.resourceQuantity,
v1.ResourceName(res2.resourceName): res2.resourceQuantity,
},
},
},
{
Name: string(uuid.NewUUID()),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceName(res1.resourceName): res2.resourceQuantity,
v1.ResourceName(res2.resourceName): res2.resourceQuantity,
},
},
},
},
},
}
podsStub.updateActivePods([]*v1.Pod{podWithPluginResourcesInInitContainers})
for _, container := range podWithPluginResourcesInInitContainers.Spec.InitContainers {
err = testManager.Allocate(podWithPluginResourcesInInitContainers, &container)
}
for _, container := range podWithPluginResourcesInInitContainers.Spec.Containers {
err = testManager.Allocate(podWithPluginResourcesInInitContainers, &container)
}
as.Nil(err)
podUID := string(podWithPluginResourcesInInitContainers.UID)
initCont1 := podWithPluginResourcesInInitContainers.Spec.InitContainers[0].Name
initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name
normalCont1 := podWithPluginResourcesInInitContainers.Spec.Containers[0].Name
normalCont2 := podWithPluginResourcesInInitContainers.Spec.Containers[1].Name
initCont1Devices := testManager.podDevices.containerDevices(podUID, initCont1, res1.resourceName)
initCont2Devices := testManager.podDevices.containerDevices(podUID, initCont2, res1.resourceName)
normalCont1Devices := testManager.podDevices.containerDevices(podUID, normalCont1, res1.resourceName)
normalCont2Devices := testManager.podDevices.containerDevices(podUID, normalCont2, res1.resourceName)
as.Equal(1, initCont1Devices.Len())
as.Equal(2, initCont2Devices.Len())
as.Equal(1, normalCont1Devices.Len())
as.Equal(1, normalCont2Devices.Len())
as.True(initCont2Devices.IsSuperset(initCont1Devices))
as.True(initCont2Devices.IsSuperset(normalCont1Devices))
as.True(initCont2Devices.IsSuperset(normalCont2Devices))
as.Equal(0, normalCont1Devices.Intersection(normalCont2Devices).Len())
}
func TestUpdatePluginResources(t *testing.T) {
pod := &v1.Pod{}
pod.UID = types.UID("testPod")
resourceName1 := "domain1.com/resource1"
devID1 := "dev1"
resourceName2 := "domain2.com/resource2"
devID2 := "dev2"
as := assert.New(t)
monitorCallback := func(resourceName string, devices []pluginapi.Device) {}
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err)
testManager := &ManagerImpl{
callback: monitorCallback,
allocatedDevices: make(map[string]sets.String),
healthyDevices: make(map[string]sets.String),
podDevices: make(podDevices),
checkpointManager: ckm,
}
testManager.podDevices[string(pod.UID)] = make(containerDevices)
// require one of resource1 and one of resource2
testManager.allocatedDevices[resourceName1] = sets.NewString()
testManager.allocatedDevices[resourceName1].Insert(devID1)
testManager.allocatedDevices[resourceName2] = sets.NewString()
testManager.allocatedDevices[resourceName2].Insert(devID2)
cachedNode := &v1.Node{
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
// has no resource1 and two of resource2
v1.ResourceName(resourceName2): *resource.NewQuantity(int64(2), resource.DecimalSI),
},
},
}
nodeInfo := &schedulerframework.NodeInfo{}
nodeInfo.SetNode(cachedNode)
testManager.UpdatePluginResources(nodeInfo, &lifecycle.PodAdmitAttributes{Pod: pod})
allocatableScalarResources := nodeInfo.Allocatable.ScalarResources
// allocatable in nodeInfo is less than needed, should update
as.Equal(1, int(allocatableScalarResources[v1.ResourceName(resourceName1)]))
// allocatable in nodeInfo is more than needed, should skip updating
as.Equal(2, int(allocatableScalarResources[v1.ResourceName(resourceName2)]))
}
func TestDevicePreStartContainer(t *testing.T) {
// Ensures that if device manager is indicated to invoke `PreStartContainer` RPC
// by device plugin, then device manager invokes PreStartContainer at endpoint interface.
// Also verifies that final allocation of mounts, envs etc is same as expected.
res1 := TestResource{
resourceName: "domain1.com/resource1",
resourceQuantity: *resource.NewQuantity(int64(2), resource.DecimalSI),
devs: []string{"dev1", "dev2"},
}
as := require.New(t)
podsStub := activePodsStub{
activePods: []*v1.Pod{},
}
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1})
as.Nil(err)
ch := make(chan []string, 1)
testManager.endpoints[res1.resourceName] = endpointInfo{
e: &MockEndpoint{
initChan: ch,
allocateFunc: allocateStubFunc(),
},
opts: &pluginapi.DevicePluginOptions{PreStartRequired: true},
}
pod := makePod(v1.ResourceList{
v1.ResourceName(res1.resourceName): res1.resourceQuantity})
activePods := []*v1.Pod{}
activePods = append(activePods, pod)
podsStub.updateActivePods(activePods)
err = testManager.Allocate(pod, &pod.Spec.Containers[0])
as.Nil(err)
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0])
as.Nil(err)
var initializedDevs []string
select {
case <-time.After(time.Second):
t.Fatalf("Timed out while waiting on channel for response from PreStartContainer RPC stub")
case initializedDevs = <-ch:
break
}
as.Contains(initializedDevs, "dev1")
as.Contains(initializedDevs, "dev2")
as.Equal(len(initializedDevs), len(res1.devs))
expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"})
as.Nil(err)
as.Equal(1, len(expectedResps.ContainerResponses))
expectedResp := expectedResps.ContainerResponses[0]
as.Equal(len(runContainerOpts.Devices), len(expectedResp.Devices))
as.Equal(len(runContainerOpts.Mounts), len(expectedResp.Mounts))
as.Equal(len(runContainerOpts.Envs), len(expectedResp.Envs))
}
func TestResetExtendedResource(t *testing.T) {
as := assert.New(t)
tmpDir, err := ioutil.TempDir("", "checkpoint")
as.Nil(err)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err)
testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String),
unhealthyDevices: make(map[string]sets.String),
allocatedDevices: make(map[string]sets.String),
podDevices: make(podDevices),
checkpointManager: ckm,
}
extendedResourceName := "domain.com/resource"
testManager.podDevices.insert("pod", "con", extendedResourceName,
constructDevices([]string{"dev1"}),
constructAllocResp(map[string]string{"/dev/dev1": "/dev/dev1"},
map[string]string{"/home/lib1": "/usr/lib1"}, map[string]string{}))
testManager.healthyDevices[extendedResourceName] = sets.NewString()
testManager.healthyDevices[extendedResourceName].Insert("dev1")
// checkpoint is present, indicating node hasn't been recreated
err = testManager.writeCheckpoint()
as.Nil(err)
as.False(testManager.ShouldResetExtendedResourceCapacity())
// checkpoint is absent, representing node recreation
ckpts, err := ckm.ListCheckpoints()
as.Nil(err)
for _, ckpt := range ckpts {
err = ckm.RemoveCheckpoint(ckpt)
as.Nil(err)
}
as.True(testManager.ShouldResetExtendedResourceCapacity())
}
func allocateStubFunc() func(devs []string) (*pluginapi.AllocateResponse, error) {
return func(devs []string) (*pluginapi.AllocateResponse, error) {
resp := new(pluginapi.ContainerAllocateResponse)
resp.Envs = make(map[string]string)
for _, dev := range devs {
switch dev {
case "dev1":
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
ContainerPath: "/dev/aaa",
HostPath: "/dev/aaa",
Permissions: "mrw",
})
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
ContainerPath: "/dev/bbb",
HostPath: "/dev/bbb",
Permissions: "mrw",
})
resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
ContainerPath: "/container_dir1/file1",
HostPath: "host_dir1/file1",
ReadOnly: true,
})
case "dev2":
resp.Devices = append(resp.Devices, &pluginapi.DeviceSpec{
ContainerPath: "/dev/ccc",
HostPath: "/dev/ccc",
Permissions: "mrw",
})
resp.Mounts = append(resp.Mounts, &pluginapi.Mount{
ContainerPath: "/container_dir1/file2",
HostPath: "host_dir1/file2",
ReadOnly: true,
})
resp.Envs["key1"] = "val1"
}
}
resps := new(pluginapi.AllocateResponse)
resps.ContainerResponses = append(resps.ContainerResponses, resp)
return resps, nil
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
integration/common/openlineage/common/provider/great_expectations/action.py
|
# SPDX-License-Identifier: Apache-2.0
import copy
import logging
import os
from collections import defaultdict
from datetime import datetime
from typing import Optional, List
from urllib.parse import urlparse
from uuid import uuid4
from great_expectations.checkpoint import ValidationAction
from great_expectations.core import ExpectationSuiteValidationResult
from great_expectations.data_context.types.resource_identifiers import \
ValidationResultIdentifier
from great_expectations.dataset import SqlAlchemyDataset, PandasDataset, \
Dataset as GEDataset
from great_expectations.execution_engine import (
SqlAlchemyExecutionEngine, PandasExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_batch_data import \
SqlAlchemyBatchData
from great_expectations.validator.validator import Validator
from openlineage.client import OpenLineageClient, OpenLineageClientOptions
from openlineage.client.facet import ParentRunFacet, DocumentationJobFacet, \
SourceCodeLocationJobFacet, DataQualityMetricsInputDatasetFacet, \
ColumnMetric
from openlineage.client.run import RunEvent, RunState, Run, Job
from openlineage.client.serde import Serde
from openlineage.common.dataset import Dataset, Source, Field
from openlineage.common.dataset import Dataset as OLDataset
from openlineage.common.provider.great_expectations.facets import \
GreatExpectationsAssertionsDatasetFacet, \
GreatExpectationsRunFacet
from openlineage.common.provider.great_expectations.results import \
EXPECTATIONS_PARSERS, \
COLUMN_EXPECTATIONS_PARSER, \
GreatExpectationsAssertion
from openlineage.common.sql import parse
from sqlalchemy import MetaData, Table
from sqlalchemy.engine import Connection
class OpenLineageValidationAction(ValidationAction):
"""
ValidationAction implementation which posts RunEvents for a GreatExpectations validation job.
Openlineage host parameters can be passed in as constructor arguments or environment variables
will be searched. Job information can optionally be passed in as constructor arguments or the
great expectations suite name and batch identifier will be used as the job name
(the namespace should be passed in as either a constructor arg or as an environment variable).
The data_asset will be inspected to determine the dataset source- SqlAlchemy datasets and
Pandas datasets are supported. SqlAlchemy datasets are typically treated as other SQL data
sources in OpenLineage. The database host and database name are treated as the data "source"
and the schema + table are treated as the table name. Columns are fetched when possible and the
schema will be posted as a facet. Some special handling for Bigquery is included, as "bigquery"
is always the data source, while the table name consists of "project.dataset.table".
Both the GreatExpectationsAssertionsDatasetFacet and DataQualityDatasetFacet are attached to
*each* dataset found in the data_asset (this includes tables that are joined in a `custom_sql`
argument). The DataQualityDatasetFacet is also posted as the more standard OpenLineage
DataQualityMetricsInputDatasetFacet.
The resulting RunEvent is returned from the _run method, so it can be seen in the
actions_results field of the validation results.
"""
def __init__(self, data_context,
openlineage_host=None,
openlineage_namespace=None,
openlineage_apiKey=None,
openlineage_parent_run_id=None,
openlineage_parent_job_namespace=None,
openlineage_parent_job_name=None,
job_name=None,
job_description=None,
code_location=None,
openlineage_run_id=None,
do_publish=True):
super().__init__(data_context)
if openlineage_host is not None:
self.openlineage_client = OpenLineageClient(openlineage_host,
OpenLineageClientOptions(
api_key=openlineage_apiKey))
else:
self.openlineage_client = OpenLineageClient.from_environment()
if openlineage_namespace is not None:
self.namespace = openlineage_namespace
else:
self.namespace = os.getenv('OPENLINEAGE_NAMESPACE', 'default')
if openlineage_run_id is not None:
self.run_id = openlineage_run_id
else:
self.run_id = uuid4()
self.parent_run_id = openlineage_parent_run_id
self.parent_job_namespace = openlineage_parent_job_namespace
self.parent_job_name = openlineage_parent_job_name
self.job_name = job_name
self.job_description = job_description
self.code_location = code_location
self.do_publish = do_publish
def _run(self,
validation_result_suite: ExpectationSuiteValidationResult,
validation_result_suite_identifier: ValidationResultIdentifier,
data_asset: [GEDataset, Validator],
expectation_suite_identifier=None,
checkpoint_identifier=None,
payload=None):
# Initialize logger here so that the action is serializable until it actually runs
self.log = logging.getLogger(self.__class__.__module__ + '.' + self.__class__.__name__)
datasets = []
if isinstance(data_asset, SqlAlchemyDataset):
datasets = self._fetch_datasets_from_sql_source(data_asset, validation_result_suite)
elif isinstance(data_asset, PandasDataset):
datasets = self._fetch_datasets_from_pandas_source(data_asset, validation_result_suite)
elif isinstance(data_asset.execution_engine, SqlAlchemyExecutionEngine):
datasets = self._fetch_datasets_from_sql_source(data_asset, validation_result_suite)
elif isinstance(data_asset.execution_engine, PandasExecutionEngine):
datasets = self._fetch_datasets_from_pandas_source(data_asset, validation_result_suite)
run_facets = {}
if self.parent_run_id is not None:
run_facets.update({"parentRun": ParentRunFacet.create(
self.parent_run_id,
self.parent_job_namespace,
self.parent_job_name
)})
# workaround for GE v2 and v3 API difference
suite_meta = copy.deepcopy(validation_result_suite.meta)
if 'expectation_suite_meta' not in suite_meta:
suite_meta['expectation_suite_meta'] = validation_result_suite.meta
run_facets.update(
{"great_expectations_meta": GreatExpectationsRunFacet(
**suite_meta,
)})
job_facets = {}
if self.job_description:
job_facets.update({
"documentation": DocumentationJobFacet(self.job_description)
})
if self.code_location:
job_facets.update({
"sourceCodeLocation": SourceCodeLocationJobFacet("", self.code_location)
})
job_name = self.job_name
if self.job_name is None:
job_name = validation_result_suite.meta["expectation_suite_name"] + '.' \
+ validation_result_suite_identifier.batch_identifier
run_event = RunEvent(
eventType=RunState.COMPLETE,
eventTime=datetime.now().isoformat(),
run=Run(runId=str(self.run_id), facets=run_facets),
job=Job(self.namespace, job_name, facets=job_facets),
inputs=datasets,
outputs=[],
producer="https://github.com/OpenLineage/OpenLineage/tree/$VERSION/integration/common/openlineage/provider/great_expectations" # noqa
)
if self.do_publish:
self.openlineage_client.emit(run_event)
# Great expectations tries to append stuff here, so we need to make it a dict
return Serde.to_dict(run_event)
def _fetch_datasets_from_pandas_source(self, data_asset: PandasDataset,
validation_result_suite: ExpectationSuiteValidationResult) -> List[OLDataset]: # noqa
"""
Generate a list of OpenLineage Datasets from a PandasDataset
:param data_asset:
:param validation_result_suite:
:return:
"""
if data_asset.batch_kwargs.__contains__("path"):
path = data_asset.batch_kwargs.get("path")
if path.startswith("/"):
path = "file://{}".format(path)
parsed_url = urlparse(path)
columns = [Field(
name=col,
type=str(data_asset[col].dtype) if data_asset[col].dtype is not None else 'UNKNOWN'
) for col in data_asset.columns]
return [
Dataset(
source=self._source(parsed_url._replace(path='')),
name=parsed_url.path,
fields=columns,
input_facets=self.results_facet(validation_result_suite)
).to_openlineage_dataset()
]
def _fetch_datasets_from_sql_source(self, data_asset: [SqlAlchemyDataset, Validator],
validation_result_suite: ExpectationSuiteValidationResult) -> List[OLDataset]: # noqa
"""
Generate a list of OpenLineage Datasets from a SqlAlchemyDataset.
:param data_asset:
:param validation_result_suite:
:return:
"""
metadata = MetaData()
if isinstance(data_asset, SqlAlchemyDataset):
if data_asset.generated_table_name is not None:
custom_sql = data_asset.batch_kwargs.get('query')
parsed_sql = parse(custom_sql)
return [
self._get_sql_table(data_asset, metadata, t.schema, t.name,
validation_result_suite) for t in
parsed_sql.in_tables
]
return [self._get_sql_table(data_asset, metadata, data_asset._table.schema,
data_asset._table.name,
validation_result_suite)]
else:
batch = data_asset.active_batch
batch_data = batch["data"]
table_name = batch["batch_spec"]["table_name"]
try:
schema_name = batch["batch_spec"]["schema_name"]
except KeyError:
schema_name = None
return [
self._get_sql_table(
batch_data,
metadata,
schema_name,
table_name,
validation_result_suite
)
]
def _get_sql_table(
self,
data_asset: [SqlAlchemyDataset, SqlAlchemyBatchData],
meta: MetaData,
schema: Optional[str],
table_name: str,
validation_result_suite: ExpectationSuiteValidationResult
) -> Optional[OLDataset]:
"""
Construct a Dataset from the connection url and the columns returned from the
SqlAlchemyDataset
:param data_asset:
:return:
"""
engine = data_asset.engine if isinstance(data_asset, SqlAlchemyDataset)\
else data_asset._engine
if isinstance(engine, Connection):
engine = engine.engine
datasource_url = engine.url
if engine.dialect.name.lower() == "bigquery":
schema = '{}.{}'.format(datasource_url.host, datasource_url.database)
table = Table(table_name, meta, autoload_with=engine)
fields = [Field(
name=key,
type=str(col.type) if col.type is not None else 'UNKNOWN',
description=col.doc
) for key, col in table.columns.items()]
name = table_name \
if schema is None \
else "{}.{}".format(schema, table_name)
results_facet = self.results_facet(validation_result_suite)
return Dataset(
source=self._source(urlparse(str(datasource_url))),
fields=fields,
name=name,
input_facets=results_facet
).to_openlineage_dataset()
def _source(self, url) -> Source:
"""
Construct a Source from the connection url. Special handling for BigQuery is included.
We attempt to strip credentials from the connection url, if present.
:param url: a parsed url, as returned from urlparse()
:return:
"""
if url.scheme == "bigquery":
return Source(
scheme='bigquery',
connection_url='bigquery'
)
return Source(
scheme=url.scheme,
authority=url.hostname,
# Remove credentials from the URL if present
connection_url=url._replace(netloc=url.hostname, query=None, fragment=None).geturl()
)
def results_facet(self, validation_result: ExpectationSuiteValidationResult):
"""
Parse the validation result and extract input facets based on the results. We'll return a
DataQualityDatasetFacet, a GreatExpectationsAssertionsDatasetFacet, and a
(openlineage standard) DataQualityMetricsInputDatasetFacet
:param validation_result:
:return:
"""
try:
data_quality_facet = self.parse_data_quality_facet(validation_result)
if not data_quality_facet:
return None
assertions_facet = self.parse_assertions(validation_result)
if not assertions_facet:
return None
return {
'dataQuality': data_quality_facet,
'greatExpectations_assertions': assertions_facet,
'dataQualityMetrics': data_quality_facet
}
except ValueError:
self.log.exception("Exception while retrieving great expectations dataset")
return None
def parse_data_quality_facet(self, validation_result: ExpectationSuiteValidationResult) \
-> Optional[DataQualityMetricsInputDatasetFacet]:
"""
Parse the validation result and extract a DataQualityDatasetFacet
:param validation_result:
:return:
"""
facet_data = {
"columnMetrics": defaultdict(dict)
}
# try to get to actual expectations results
try:
expectations_results = validation_result['results']
for expectation in expectations_results:
for parser in EXPECTATIONS_PARSERS:
# accept possible duplication, should have no difference in results
if parser.can_accept(expectation):
result = parser.parse_expectation_result(expectation)
facet_data[result.facet_key] = result.value
for parser in COLUMN_EXPECTATIONS_PARSER:
if parser.can_accept(expectation):
result = parser.parse_expectation_result(expectation)
facet_data['columnMetrics'][result.column_id][result.facet_key] \
= result.value
for key in facet_data['columnMetrics'].keys():
facet_data['columnMetrics'][key] = ColumnMetric(**facet_data['columnMetrics'][key])
return DataQualityMetricsInputDatasetFacet(**facet_data)
except ValueError:
self.log.exception(
"Great Expectations's CheckpointResult object does not have expected key"
)
return None
def parse_assertions(self, validation_result: ExpectationSuiteValidationResult) -> \
Optional[GreatExpectationsAssertionsDatasetFacet]:
assertions = []
try:
for expectation in validation_result.results:
assertions.append(GreatExpectationsAssertion(
expectationType=expectation['expectation_config']['expectation_type'],
success=expectation['success'],
column=expectation['expectation_config']['kwargs'].get('column', None)
))
return GreatExpectationsAssertionsDatasetFacet(assertions)
except ValueError:
self.log.exception(
"Great Expectations's CheckpointResult object does not have expected key"
)
return None
|
[] |
[] |
[
"OPENLINEAGE_NAMESPACE"
] |
[]
|
["OPENLINEAGE_NAMESPACE"]
|
python
| 1 | 0 | |
internal/contour/listener_adobe.go
|
package contour
import (
"encoding/json"
"os"
udpa_type_v1 "github.com/cncf/udpa/go/udpa/type/v1"
envoy_api_v2_auth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
envoy_api_v2_listener "github.com/envoyproxy/go-control-plane/envoy/api/v2/listener"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
_struct "github.com/golang/protobuf/ptypes/struct"
"github.com/projectcontour/contour/internal/protobuf"
)
type (
Cidr struct {
AddressPrefix string `json:"address_prefix"`
PrefixLen float64 `json:"prefix_len"`
}
IpAllowDenyConfig struct {
AllowCidrs *[]Cidr `json:"allow_cidrs"`
DenyCidrs *[]Cidr `json:"deny_cidrs"`
}
)
var ipAllowDenyListenerFilter *envoy_api_v2_listener.ListenerFilter
func init() {
path := os.Getenv("CIDR_LIST_PATH")
if path == "" {
return
}
f, err := os.Open(path)
if err != nil {
panic("CIDR_LIST_PATH was provided but os.Open failed " + err.Error())
}
defer f.Close()
config := IpAllowDenyConfig{}
err = json.NewDecoder(f).Decode(&config)
if err != nil {
panic("could not deserialize cidrs in CIDR_LIST_PATH " + path)
}
structFields := make(map[string]*_struct.Value)
if config.AllowCidrs != nil {
cidrToProto(*config.AllowCidrs, "allow_cidrs", structFields)
}
if config.DenyCidrs != nil {
cidrToProto(*config.DenyCidrs, "deny_cidrs", structFields)
}
if len(structFields) > 0 {
ipAllowDenyListenerFilter = new(envoy_api_v2_listener.ListenerFilter)
ipAllowDenyListenerFilter.Name = "envoy.listener.ip_allow_deny"
ipAllowDenyListenerFilter.ConfigType = &envoy_api_v2_listener.ListenerFilter_TypedConfig{
TypedConfig: protobuf.MustMarshalAny(&udpa_type_v1.TypedStruct{
TypeUrl: "envoy.config.filter.network.ip_allow_deny.v2.IpAllowDeny",
Value: &_struct.Struct{
Fields: structFields,
},
}),
}
}
}
func cidrToProto(cidrs []Cidr, key string, structFields map[string]*_struct.Value) {
cidrList := &_struct.ListValue{
Values: make([]*_struct.Value, 0),
}
structFields[key] = &_struct.Value{
Kind: &_struct.Value_ListValue{
ListValue: cidrList,
},
}
for _, cidr := range cidrs {
cidrStruct := &_struct.Struct{
Fields: make(map[string]*_struct.Value),
}
cidrStruct.Fields["address_prefix"] = &_struct.Value{
Kind: &_struct.Value_StringValue{
StringValue: cidr.AddressPrefix,
},
}
cidrStruct.Fields["prefix_len"] = &_struct.Value{
Kind: &_struct.Value_NumberValue{
NumberValue: cidr.PrefixLen,
},
}
cidrList.Values = append(cidrList.Values, &_struct.Value{
Kind: &_struct.Value_StructValue{
StructValue: cidrStruct,
},
})
}
}
func CustomListenerFilters() []*envoy_api_v2_listener.ListenerFilter {
if ipAllowDenyListenerFilter == nil {
return []*envoy_api_v2_listener.ListenerFilter{}
}
return []*envoy_api_v2_listener.ListenerFilter{ipAllowDenyListenerFilter}
}
// maxProtoVersion returns the max supported version if the given version is TLS_AUTO
func maxProtoVersion(version envoy_api_v2_auth.TlsParameters_TlsProtocol) envoy_api_v2_auth.TlsParameters_TlsProtocol {
if version == envoy_api_v2_auth.TlsParameters_TLS_AUTO {
return envoy_api_v2_auth.TlsParameters_TLSv1_3
}
return version
}
// isTCPProxyFilter returns true if the given list contains a tcp_proxy filter
func isTCPProxyFilter(filters []*envoy_api_v2_listener.Filter) bool {
for _, f := range filters {
if f.Name == wellknown.TCPProxy {
return true
}
}
return false
}
|
[
"\"CIDR_LIST_PATH\""
] |
[] |
[
"CIDR_LIST_PATH"
] |
[]
|
["CIDR_LIST_PATH"]
|
go
| 1 | 0 | |
editor/editor.go
|
// Package editor is a collection of utilities to find and spawn a sensible editor
package editor
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
)
const bugMessage = "This is a bug in sensible; please file at https://github.com/ernestrc/sensible/issues"
// inspired by i3-sensible-editor
// The order has been altered to make the world a better place
var editors = []string{"$EDITOR", "$VISUAL", "vim", "nvim", "vi", "emacs", "nano", "pico", "qe", "mg", "jed", "gedit", "mc-edit"}
var basePath = []string{"/usr/local/bin", "/usr/bin", "/usr/sbin", "/bin"}
var userPath []string
var selectedExec string
var selectedArgs []string
var selectedEditor *Editor
func init() {
editors[0] = os.Getenv("EDITOR")
editors[1] = os.Getenv("VISUAL")
pathEnv := os.Getenv("PATH")
if pathEnv == "" {
userPath = basePath
} else {
userPath = strings.Split(pathEnv, ":")
}
}
func isExecutable(f os.FileInfo) bool {
return f.Mode().Perm()|0111 != 0
}
func getFileName(f os.FileInfo) string {
_, fileName := filepath.Split(f.Name())
return fileName
}
func isRegularOrSymlink(finfo os.FileInfo) bool {
mode := finfo.Mode()
return mode.IsRegular() || mode&os.ModeSymlink != 0
}
func parseAlias(alias string) (name string, args []string) {
split := strings.Split(alias, " ")
if len(split) == 0 {
return "", nil
}
_, name = filepath.Split(split[0])
return name, split[1:]
}
func findExec(alias string) (execPath string, execArgs []string, err error) {
var files []os.FileInfo
name, args := parseAlias(alias)
for _, dir := range userPath {
if files, err = ioutil.ReadDir(dir); err != nil {
return
}
for _, finfo := range files {
if isRegularOrSymlink(finfo) &&
isExecutable(finfo) &&
getFileName(finfo) == name {
execPath = path.Join(dir, name)
execArgs = args
return
}
}
}
return "", nil, nil
}
func (e *Editor) clean() {
e.proc = nil
e.procState = nil
}
func findEditor(editors []string) (editor *Editor, err error) {
// cached
if selectedExec != "" {
if selectedArgs == nil {
panic(fmt.Sprintf("parsed args is empty but selected has been cached. %s", bugMessage))
}
return NewEditor(selectedExec, selectedArgs...), nil
}
for _, editor := range editors {
selectedExec, selectedArgs, err = findExec(editor)
if err != nil {
return nil, err
}
if selectedExec != "" {
return NewEditor(selectedExec, selectedArgs...), nil
}
}
return nil, fmt.Errorf("FindEditor: could not find an editor; please set $VISUAL or $EDITOR environment variables or install one of the following editors: %v", editors)
}
// NewEditor will create a new Editor struct with the given executable path
func NewEditor(abspath string, args ...string) *Editor {
return &Editor{path: abspath, Args: args}
}
// FindEditor will attempt to find the user's preferred editor
// by scanning the PATH in search of EDITOR and VISUAL env variables
// or will default to one of the commonly installed editors.
// Failure to find a suitable editor will result in an error
func FindEditor() (editor *Editor, err error) {
return findEditor(editors)
}
// Edit will attempt to edit the passed files with the user's preferred editor.
// Check the documentation of Editor.Edit and FindEditor for more information.
func Edit(files ...*os.File) error {
var err error
if selectedEditor == nil {
if selectedEditor, err = FindEditor(); err != nil {
return err
}
}
return selectedEditor.Edit(files...)
}
// EditTmp will place the contents of "in" in a temp file,
// start a editor process to edit the tmp file, and return
// the contents of the tmp file after the process exits, or an error
// if editor exited with non 0 status
func EditTmp(in string) (out string, err error) {
if selectedEditor == nil {
if selectedEditor, err = FindEditor(); err != nil {
return
}
}
return selectedEditor.EditTmp(in)
}
// Editor stores the information about an editor and its processes
type Editor struct {
path string
proc *os.Process
procState *os.ProcessState
// extra arguments to be passed to the editor process before filename(s)
Args []string
// extra process attributes to be used when spawning editor process
ProcAttrs *os.ProcAttr
}
// GetPath returns the editors executable path
func (e *Editor) GetPath() string {
return e.path
}
// Edit will start a new process and wait for the process to exit.
// If process exists with non 0 status, this will be reported as an error
func (e *Editor) Edit(files ...*os.File) error {
var err error
if err = e.Start(files...); err != nil {
return err
}
if err = e.Wait(); err != nil {
return err
}
return nil
}
// Start will start a new process and pass the list of files as arguments
func (e *Editor) Start(f ...*os.File) error {
if e.proc != nil {
return fmt.Errorf("Editor.Start: there is already an ongoing session")
}
args := []string{""}
var fds = []*os.File{os.Stdin, os.Stdout, os.Stderr}
if e.Args != nil {
for _, arg := range e.Args {
args = append(args, arg)
}
}
for _, file := range f {
args = append(args, file.Name())
fds = append(fds, file)
}
var procAttrs *os.ProcAttr
if e.ProcAttrs == nil {
procAttrs = &os.ProcAttr{
Dir: "",
Env: nil,
Files: fds,
Sys: nil,
}
} else {
procAttrs = e.ProcAttrs
}
var err error
if e.proc, err = os.StartProcess(e.path, args, procAttrs); err != nil {
return err
}
return nil
}
// Wait waits for the current editor process to exit and returns
// an error if editor exited with non 0 status
func (e *Editor) Wait() error {
var err error
if e.proc == nil {
return fmt.Errorf("Editor.Wait: no process is currently running")
}
if e.procState, err = e.proc.Wait(); err != nil {
return err
}
if !e.procState.Success() {
return fmt.Errorf("Editor.Wait: editor process exited with non 0 status: %s", e.procState.String())
}
e.clean()
return nil
}
// EditTmp will place the contents of "in" in a temp file,
// start a editor process to edit the tmp file, and return
// the contents of the tmp file after the process exits, or an error
// if editor exited with non 0 status
func (e *Editor) EditTmp(in string) (out string, err error) {
var f *os.File
var outBytes []byte
if f, err = ioutil.TempFile("/tmp", "sedit_"); err != nil {
return
}
if err = ioutil.WriteFile(f.Name(), []byte(in), 0600); err != nil {
return
}
if err = e.Edit(f); err != nil {
return
}
if outBytes, err = ioutil.ReadFile(f.Name()); err != nil {
return
}
out = string(outBytes)
return
}
|
[
"\"EDITOR\"",
"\"VISUAL\"",
"\"PATH\""
] |
[] |
[
"VISUAL",
"EDITOR",
"PATH"
] |
[]
|
["VISUAL", "EDITOR", "PATH"]
|
go
| 3 | 0 | |
main.go
|
package main
/**
* This is the main file for the Task application
* License: MIT
**/
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/gorilla/csrf"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/pbillerot/graduel/config"
"github.com/pbillerot/graduel/dico"
"github.com/pbillerot/graduel/views"
)
// Store as
var Store = sessions.NewCookieStore([]byte(os.Getenv("SESSION_KEY")))
func main() {
conf, err := config.ReadConfig()
var port *string
if err != nil {
port = flag.String("port", "", "IP address")
flag.Parse()
//User is expected to give :8081 like input, if they give 8081
//we'll append the required ':'
if !strings.HasPrefix(*port, ":") {
*port = ":" + *port
log.Println("port is " + *port)
}
conf.ServerPort = *port
}
views.PopulateTemplates(conf.Template)
r := mux.NewRouter()
r.HandleFunc("/about", views.RequiresLogin(views.AboutFunc))
r.HandleFunc("/login", views.LoginFunc)
r.HandleFunc("/logout", views.RequiresLogin(views.LogoutFunc))
r.HandleFunc("/signup", views.SignUpFunc)
r.HandleFunc("/", views.RequiresLogin(views.ShowPortailFunc))
r.PathPrefix("/").Handler(http.FileServer(http.Dir("public")))
r.HandleFunc("/favicon.ico", views.FaviconHandler)
// http.HandleFunc("/api/get-task/", views.GetTasksFuncAPI)
// http.HandleFunc("/api/get-deleted-task/", views.GetDeletedTaskFuncAPI)
// http.HandleFunc("/api/add-task/", views.AddTaskFuncAPI)
// http.HandleFunc("/api/update-task/", views.UpdateTaskFuncAPI)
// http.HandleFunc("/api/delete-task/", views.DeleteTaskFuncAPI)
// http.HandleFunc("/api/get-token/", views.GetTokenHandler)
// http.HandleFunc("/api/get-category/", views.GetCategoryFuncAPI)
// http.HandleFunc("/api/add-category/", views.AddCategoryFuncAPI)
// http.HandleFunc("/api/update-category/", views.UpdateCategoryFuncAPI)
// http.HandleFunc("/api/delete-category/", views.DeleteCategoryFuncAPI)
// Chargement du dictionnaire
// application, err := dico.LoadDico()
// log.Println(application)
dico.GetDico()
log.Println(fmt.Printf("running server on http://localhost%s/login", conf.ServerPort))
http.ListenAndServe(conf.ServerPort,
wrapHandlerWithLogging(
csrf.Protect(
[]byte(os.Getenv("SECRET_KEY")),
csrf.Secure(conf.CsrfSecure),
)(r)))
}
func logRequest(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s %d %s\n", r.RemoteAddr, r.Method, http.StatusOK, r.URL)
handler.ServeHTTP(w, r)
})
}
// https://ndersson.me/post/capturing_status_code_in_net_http/
func wrapHandlerWithLogging(wrappedHandler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := newLoggingResponseWriter(w)
wrappedHandler.ServeHTTP(lrw, r)
statusCode := lrw.statusCode
log.Printf("%s %s %d %s", r.RemoteAddr, r.Method, statusCode, r.URL.Path)
// log.Printf("<-- %d %s", statusCode, http.StatusText(statusCode))
})
}
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
return &loggingResponseWriter{w, http.StatusOK}
}
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
|
[
"\"SESSION_KEY\"",
"\"SECRET_KEY\""
] |
[] |
[
"SECRET_KEY",
"SESSION_KEY"
] |
[]
|
["SECRET_KEY", "SESSION_KEY"]
|
go
| 2 | 0 | |
rain_api_core/general_util.py
|
import logging
import os
import sys
import json
import time
import re
UNCENSORED_LOGGING = os.getenv("UNCENSORED_LOGGING")
LOG_CENSOR = [
{ "regex": r"(eyJ0e[A-Za-z0-9-_]{10})[A-Za-z0-9-_]*\.[A-Za-z0-9-_]*\.[A-Za-z0-9-_]*([A-Za-z0-9-_]{10})",
"replace": "\\g<1>XXX<JWTTOKEN>XXX\\g<2>",
"description": "X-out JWT Token payload"
},
{ "regex": r"(EDL-[A-Za-z0-9]+)[A-Za-z0-9]{40}([A-Za-z0-9]{10})",
"replace": "\\g<1>XXX<EDLTOKEN>XXX\\g<2>",
"description": "X-out non-JWT EDL token"
},
{ "regex": r"(Basic [A-Za-z0-9-_]{5})[A-Za-z0-9]*([A-Za-z0-9-_]{5})",
"replace": "\\g<1>XXX<BASICAUTH>XXX\\g<2>",
"description": "X-out Basic Auth Credentials"
},
{ "regex": r"([^A-Za-z0-9/+=][A-Za-z0-9/+=]{5})[A-Za-z0-9/+=]{30}([A-Za-z0-9/+=]{5}[^A-Za-z0-9/+=])",
"replace": "\\g<1>XXX<AWSSECRET>XXX\\g<2>",
"description": "X-out AWS Secret"
}
]
def return_timing_object(**timing):
timing_object = { "service": "Unknown", "endpoint": "Unknown", "method": "GET", "duration": 0, "unit": "milliseconds"}
timing_object.update({k.lower(): v for k,v in timing.items()})
return {"timing":timing_object }
def duration(time_in):
# Return the time duration in milliseconds
delta = time.time() - time_in
return(float("{:.2f}".format(delta*1000)))
def filter_log_credentials(msg):
if UNCENSORED_LOGGING:
return msg
for regex in LOG_CENSOR:
result = re.sub(regex["regex"], regex["replace"], msg, 0, re.MULTILINE)
if result:
msg = str(result)
return msg
def reformat_for_json(msg):
if type(msg) is dict:
return json.dumps(msg).replace("'", '"')
if '{' in msg:
try:
json_obj = json.loads(msg)
return json.dumps(json_obj).replace("'", '"')
except json.decoder.JSONDecodeError:
# Not JSON.
pass
return '"{0}"'.format(msg)
class CustomLogFilter(logging.Filter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = { 'build_vers': os.getenv("BUILD_VERSION", "NOBUILD"),
'maturity': os.getenv('MATURITY', 'DEV'),
'request_id': None,
'origin_request_id': None,
'user_id': None,
'route': None
}
def filter(self, record):
record.msg = filter_log_credentials(reformat_for_json(record.msg))
record.build_vers = self.params['build_vers']
record.maturity = self.params['maturity']
record.request_id = self.params['request_id']
record.origin_request_id = self.params['origin_request_id']
record.user_id = self.params['user_id']
record.route = self.params['route']
return True
def update(self, **context):
for key in context:
self.params.update({key: context[key]})
custom_log_filter = CustomLogFilter()
def log_context(**context):
custom_log_filter.update(**context)
def get_log():
loglevel = os.getenv('LOGLEVEL', 'INFO')
logtype = os.getenv('LOGTYPE', 'json')
if logtype == 'flat':
log_fmt_str = "%(levelname)s: %(message)s (%(filename)s line " + \
"%(lineno)d/%(build_vers)s/%(maturity)s) - " + \
"RequestId: %(request_id)s; OriginRequestId: %(origin_request_id)s; user_id: %(user_id)s; route: %(route)s"
else:
log_fmt_str = '{"level": "%(levelname)s", ' + \
'"RequestId": "%(request_id)s", ' + \
'"OriginRequestId": "%(origin_request_id)s", ' + \
'"message": %(message)s, ' + \
'"maturity": "%(maturity)s", ' + \
'"user_id": "%(user_id)s", ' + \
'"route": "%(route)s", ' + \
'"build": "%(build_vers)s", ' + \
'"filename": "%(filename)s", ' + \
'"lineno": %(lineno)d } '
logger = logging.getLogger()
for h in logger.handlers:
logger.removeHandler(h)
h = logging.StreamHandler(sys.stdout)
h.setFormatter(logging.Formatter(log_fmt_str))
h.addFilter(custom_log_filter)
logger.addHandler(h)
logger.setLevel(getattr(logging, loglevel))
if os.getenv("QUIETBOTO", 'TRUE').upper() == 'TRUE':
# BOTO, be quiet plz
logging.getLogger('boto3').setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('nose').setLevel(logging.ERROR)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('s3transfer').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('connectionpool').setLevel(logging.ERROR)
return logger
|
[] |
[] |
[
"MATURITY",
"BUILD_VERSION",
"UNCENSORED_LOGGING",
"LOGTYPE",
"QUIETBOTO",
"LOGLEVEL"
] |
[]
|
["MATURITY", "BUILD_VERSION", "UNCENSORED_LOGGING", "LOGTYPE", "QUIETBOTO", "LOGLEVEL"]
|
python
| 6 | 0 | |
omas/omas_uda.py
|
'''save/load from UDA routines
-------
'''
from .omas_utils import *
from .omas_core import ODS
try:
_pyyda_import_excp = None
import pyuda
except ImportError as _excp:
_pyyda_import_excp = _excp
# replace pyuda class by a simple exception throwing class
class pyuda(object):
"""Import error UDA class"""
def __init__(self, *args, **kwargs):
raise _pyuda_import_excp
def load_omas_uda(
server=None,
port=None,
pulse=None,
run=0,
paths=None,
imas_version=os.environ.get('IMAS_VERSION', omas_rcparams['default_imas_version']),
skip_uncertainties=False,
verbose=True,
):
"""
Load UDA data to OMAS
:param server: UDA server
:param port: UDA port
:param pulse: UDA pulse
:param run: UDA run
:param paths: list of paths to load from IMAS
:param imas_version: IMAS version
:param skip_uncertainties: do not load uncertain data
:param verbose: print loading progress
:return: OMAS data set
"""
if pulse is None or run is None:
raise Exception('`pulse` and `run` must be specified')
if server is not None:
pyuda.Client.server = server
elif not os.environ['UDA_HOST']:
raise pyuda.UDAException('Must set UDA_HOST environmental variable')
if port is not None:
pyuda.Client.port = port
elif not os.environ['UDA_PORT']:
raise pyuda.UDAException('Must set UDA_PORT environmental variable')
# set this to get pyuda metadata (maybe of interest for future use):
# pyuda.Client.set_property(pyuda.Properties.PROP_META, True)
client = pyuda.Client()
# if paths is None then figure out what IDS are available and get ready to retrieve everything
if paths is None:
requested_paths = [[structure] for structure in list_structures(imas_version=imas_version)]
else:
requested_paths = map(p2l, paths)
available_ds = []
for ds in numpy.unique([p[0] for p in requested_paths]):
if uda_get(client, [ds, 'ids_properties', 'homogeneous_time'], pulse, run) is None:
if verbose:
print('- ', ds)
continue
if verbose:
print('* ', ds)
available_ds.append(ds)
ods = ODS(consistency_check=False)
for k, ds in enumerate(available_ds):
filled_paths_in_uda(
ods,
client,
pulse,
run,
load_structure(ds, imas_version=imas_version)[1],
path=[],
paths=[],
requested_paths=requested_paths,
skip_uncertainties=skip_uncertainties,
perc=[float(k) / len(available_ds) * 100, float(k + 1) / len(available_ds) * 100, float(k) / len(available_ds) * 100],
)
ods.consistency_check = True
ods.prune()
if verbose:
print()
return ods
def filled_paths_in_uda(ods, client, pulse, run, ds, path, paths, requested_paths, skip_uncertainties, perc=[0.0, 100.0, 0.0]):
"""
Recursively traverse ODS and populate it with data from UDA
:param ods: ODS to be filled
:param client: UDA client
:param pulse: UDA pulse
:param run: UDA run
:param ds: hierarchical data schema as returned for example by load_structure('equilibrium')[1]
:param path: []
:param paths: []
:param requested_paths: list of paths that are requested
:param skip_uncertainties: do not load uncertain data
:return: filled ODS
"""
# leaf
if not len(ds):
return paths
# keys
keys = list(ds.keys())
if keys[0] == ':':
n = uda_get_shape(client, path, pulse, run)
if n is None:
return paths
keys = range(n)
# kid must be part of this list
if len(requested_paths):
request_check = [p[0] for p in requested_paths]
# traverse
n = float(len(keys))
for k, kid in enumerate(keys):
if isinstance(kid, str):
if skip_uncertainties and kid.endswith('_error_upper'):
continue
if kid.endswith('_error_lower') or kid.endswith('_error_index'):
continue
kkid = kid
else:
kkid = ':'
# leaf
if not len(ds[kkid]):
# append path if it has data
data = uda_get(client, path + [kid], pulse, run)
if data is not None:
# print(l2o(path))
ods[kid] = data
paths.append(path + [kid])
pp = perc[0] + (k + 1) / n * (perc[1] - perc[0])
if (pp - perc[2]) > 2:
perc[2] = pp
print('\rLoading: %3.1f%%' % pp, end='')
propagate_path = copy.copy(path)
propagate_path.append(kid)
# generate requested_paths one level deeper
propagate_requested_paths = requested_paths
if len(requested_paths):
if kid in request_check or (isinstance(kid, int) and ':' in request_check):
propagate_requested_paths = [p[1:] for p in requested_paths if len(p) > 1 and (kid == p[0] or p[0] == ':')]
else:
continue
# recursive call
pp0 = perc[0] + k / n * (perc[1] - perc[0])
pp1 = perc[0] + (k + 1) / n * (perc[1] - perc[0])
pp2 = perc[2]
paths = filled_paths_in_uda(
ods[kid], client, pulse, run, ds[kkid], propagate_path, [], propagate_requested_paths, skip_uncertainties, [pp0, pp1, pp2]
)
# generate uncertain data
if not skip_uncertainties and isinstance(ods.omas_data, dict):
for kid in list(ods.omas_data.keys()):
if kid.endswith('_error_upper') and kid[: -len('_error_upper')] in ods.omas_data:
try:
if isinstance(ods[kid], ODS):
pass
elif isinstance(ods[kid], float):
ods[kid[: -len('_error_upper')]] = ufloat(ods[kid[: -len('_error_upper')]], ods[kid])
else:
ods[kid[: -len('_error_upper')]] = uarray(ods[kid[: -len('_error_upper')]], ods[kid])
del ods[kid]
except Exception as _excp:
printe('Error loading uncertain data: %s' % kid)
return paths
def uda_get_shape(client, path, pulse, run):
"""
Get the number of elements in a structure of arrays
:param client: pyuda.Client object
:param path: ODS path expressed as list
:param pulse: UDA pulse
:param run: UDA run
:return: integer
"""
return uda_get(client, path + ['Shape_of'], pulse, run)
def offset(path, off):
"""
IMAS UDA indexing starts from one
:param path: ODS path expressed as list
:param off: offset to apply
:return: path with applied offset
"""
return [p if isinstance(p, str) else p + off for p in path]
def uda_get(client, path, pulse, run):
"""
Get the data from UDA
:param client: pyuda.Client object
:param path: ODS path expressed as list
:param pulse: UDA pulse
:param run: UDA run
:return: data
"""
try:
location = l2o(offset(path, +1)).replace('.', '/')
tmp = client.get(location, pulse)
if isinstance(tmp, pyuda._string.String):
return tmp.str
else:
return tmp.data
except pyuda.UDAException:
return None
|
[] |
[] |
[
"UDA_HOST",
"UDA_PORT",
"IMAS_VERSION"
] |
[]
|
["UDA_HOST", "UDA_PORT", "IMAS_VERSION"]
|
python
| 3 | 0 | |
setup.py
|
from numpy.distutils.core import setup, Extension
import os, sys, cdat_info
try:
sys.path.append(os.environ.get('BUILD_DIR',"build"))
externals = cdat_info.externals
except:
externals = cdat_info.__path__
externals = os.environ.get("EXTERNALS",externals)
target_prefix = sys.prefix
for i in range(len(sys.argv)):
a = sys.argv[i]
if a=='--prefix':
target_prefix=sys.argv[i+1]
sp = a.split("--prefix=")
if len(sp)==2:
target_prefix=sp[1]
setup (name = "udunits",
version='1.0',
author='[email protected]',
description = "Python wrapping for UDUNITS package developped by UNIDATA",
url = "http://www-pcmdi.llnl.gov/software",
packages = ['unidata'],
package_dir = {'unidata': 'Lib'},
ext_modules = [
Extension('unidata.udunits_wrap',
['Src/udunits_wrap.c',
## 'Src/utparse.c',
## 'Src/utlib.c',
## 'Src/utscan.c',
],
include_dirs = [os.path.join(externals,'include')],
library_dirs = [os.path.join(externals,'lib')],
libraries=['udunits2','expat']
)
]
)
f=open('Src/udunits.dat')
version=sys.version.split()[0].split('.')
version='.'.join(version[:2])
try:
f2=open(target_prefix+'/lib/python'+version+'/site-packages/unidata/udunits.dat','w')
except:
f2=open(target_prefix+'/lib64/python'+version+'/site-packages/unidata/udunits.dat','w')
for l in f.xreadlines():
f2.write(l)
f2.close()
|
[] |
[] |
[
"BUILD_DIR",
"EXTERNALS"
] |
[]
|
["BUILD_DIR", "EXTERNALS"]
|
python
| 2 | 0 | |
vendor/src/golang.org/x/tools/dashboard/builder/main.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"golang.org/x/tools/go/vcs"
)
const (
codeProject = "go"
codePyScript = "misc/dashboard/googlecode_upload.py"
gofrontendImportPath = "code.google.com/p/gofrontend"
mkdirPerm = 0750
waitInterval = 30 * time.Second // time to wait before checking for new revs
pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours
)
type Builder struct {
goroot *Repo
name string
goos, goarch string
key string
env builderEnv
// Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit.
lastWorkpath string
}
var (
doBuild = flag.Bool("build", true, "Build and test packages")
doBench = flag.Bool("bench", false, "Run benchmarks")
buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build")
dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path")
buildRelease = flag.Bool("release", false, "Build and upload binary release archives")
buildRevision = flag.String("rev", "", "Build specified revision and exit")
buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)")
buildTool = flag.String("tool", "go", "Tool to build.")
gcPath = flag.String("gcpath", "go.googlesource.com/go", "Path to download gc from")
gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from")
gccOpts = flag.String("gccopts", "", "Command-line options to pass to `make` when building gccgo")
benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from")
failAll = flag.Bool("fail", false, "fail all builds")
parallel = flag.Bool("parallel", false, "Build multiple targets in parallel")
buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests")
cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command")
benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times")
benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run")
benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB")
fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)")
verbose = flag.Bool("v", false, "verbose")
report = flag.Bool("report", true, "whether to report results to the dashboard")
)
var (
binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`)
releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`)
allCmd = "all" + suffix
makeCmd = "make" + suffix
raceCmd = "race" + suffix
cleanCmd = "clean" + suffix
suffix = defaultSuffix()
exeExt = defaultExeExt()
benchCPU = CpuList([]int{1})
benchAffinity = CpuList([]int{})
benchMutex *FileMutex // Isolates benchmarks from other activities
)
// CpuList is used as flag.Value for -benchcpu flag.
type CpuList []int
func (cl *CpuList) String() string {
str := ""
for _, cpu := range *cl {
if str == "" {
str = strconv.Itoa(cpu)
} else {
str += fmt.Sprintf(",%v", cpu)
}
}
return str
}
func (cl *CpuList) Set(str string) error {
*cl = []int{}
for _, val := range strings.Split(str, ",") {
val = strings.TrimSpace(val)
if val == "" {
continue
}
cpu, err := strconv.Atoi(val)
if err != nil || cpu <= 0 {
return fmt.Errorf("%v is a bad value for GOMAXPROCS", val)
}
*cl = append(*cl, cpu)
}
if len(*cl) == 0 {
*cl = append(*cl, 1)
}
return nil
}
func main() {
flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking")
flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0])
flag.PrintDefaults()
os.Exit(2)
}
flag.Parse()
if len(flag.Args()) == 0 {
flag.Usage()
}
vcs.ShowCmd = *verbose
vcs.Verbose = *verbose
benchMutex = MakeFileMutex(*fileLock)
rr, err := repoForTool()
if err != nil {
log.Fatal("Error finding repository:", err)
}
rootPath := filepath.Join(*buildroot, "goroot")
goroot := &Repo{
Path: rootPath,
Master: rr,
}
// set up work environment, use existing environment if possible
if goroot.Exists() || *failAll {
log.Print("Found old workspace, will use it")
} else {
if err := os.RemoveAll(*buildroot); err != nil {
log.Fatalf("Error removing build root (%s): %s", *buildroot, err)
}
if err := os.Mkdir(*buildroot, mkdirPerm); err != nil {
log.Fatalf("Error making build root (%s): %s", *buildroot, err)
}
var err error
goroot, err = RemoteRepo(goroot.Master.Root, rootPath)
if err != nil {
log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err)
}
goroot, err = goroot.Clone(goroot.Path, "")
if err != nil {
log.Fatal("Error cloning repository:", err)
}
}
// set up builders
builders := make([]*Builder, len(flag.Args()))
for i, name := range flag.Args() {
b, err := NewBuilder(goroot, name)
if err != nil {
log.Fatal(err)
}
builders[i] = b
}
if *failAll {
failMode(builders)
return
}
// if specified, build revision and return
if *buildRevision != "" {
hash, err := goroot.FullHash(*buildRevision)
if err != nil {
log.Fatal("Error finding revision: ", err)
}
var exitErr error
for _, b := range builders {
if err := b.buildHash(hash); err != nil {
log.Println(err)
exitErr = err
}
}
if exitErr != nil && !*report {
// This mode (-report=false) is used for
// testing Docker images, making sure the
// environment is correctly configured. For
// testing, we want a non-zero exit status, as
// returned by log.Fatal:
log.Fatal("Build error.")
}
return
}
if !*doBuild && !*doBench {
fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n")
os.Exit(2)
}
// go continuous build mode
// check for new commits and build them
benchMutex.RLock()
for {
built := false
t := time.Now()
if *parallel {
done := make(chan bool)
for _, b := range builders {
go func(b *Builder) {
done <- b.buildOrBench()
}(b)
}
for _ = range builders {
built = <-done || built
}
} else {
for _, b := range builders {
built = b.buildOrBench() || built
}
}
// sleep if there was nothing to build
benchMutex.RUnlock()
if !built {
time.Sleep(waitInterval)
}
benchMutex.RLock()
// sleep if we're looping too fast.
dt := time.Now().Sub(t)
if dt < waitInterval {
time.Sleep(waitInterval - dt)
}
}
}
// go continuous fail mode
// check for new commits and FAIL them
func failMode(builders []*Builder) {
for {
built := false
for _, b := range builders {
built = b.failBuild() || built
}
// stop if there was nothing to fail
if !built {
break
}
}
}
func NewBuilder(goroot *Repo, name string) (*Builder, error) {
b := &Builder{
goroot: goroot,
name: name,
}
// get builderEnv for this tool
var err error
if b.env, err = b.builderEnv(name); err != nil {
return nil, err
}
if *report {
err = b.setKey()
}
return b, err
}
func (b *Builder) setKey() error {
// read keys from keyfile
fn := ""
switch runtime.GOOS {
case "plan9":
fn = os.Getenv("home")
case "windows":
fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
default:
fn = os.Getenv("HOME")
}
fn = filepath.Join(fn, ".gobuildkey")
if s := fn + "-" + b.name; isFile(s) { // builder-specific file
fn = s
}
c, err := ioutil.ReadFile(fn)
if err != nil {
// If the on-disk file doesn't exist, also try the
// Google Compute Engine metadata.
if v := gceProjectMetadata("buildkey-" + b.name); v != "" {
b.key = v
return nil
}
return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err)
}
b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0]))
return nil
}
func gceProjectMetadata(attr string) string {
client := &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 750 * time.Millisecond,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 750 * time.Millisecond,
},
}
req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := client.Do(req)
if err != nil {
return ""
}
defer res.Body.Close()
if res.StatusCode != 200 {
return ""
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return ""
}
return string(bytes.TrimSpace(slurp))
}
// builderEnv returns the builderEnv for this buildTool.
func (b *Builder) builderEnv(name string) (builderEnv, error) {
// get goos/goarch from builder string
s := strings.SplitN(b.name, "-", 3)
if len(s) < 2 {
return nil, fmt.Errorf("unsupported builder form: %s", name)
}
b.goos = s[0]
b.goarch = s[1]
switch *buildTool {
case "go":
return &goEnv{
goos: s[0],
goarch: s[1],
}, nil
case "gccgo":
return &gccgoEnv{}, nil
default:
return nil, fmt.Errorf("unsupported build tool: %s", *buildTool)
}
}
// buildCmd returns the build command to invoke.
// Builders which contain the string '-race' in their
// name will override *buildCmd and return raceCmd.
func (b *Builder) buildCmd() string {
if strings.Contains(b.name, "-race") {
return raceCmd
}
return *buildCmd
}
// buildOrBench checks for a new commit for this builder
// and builds or benchmarks it if one is found.
// It returns true if a build/benchmark was attempted.
func (b *Builder) buildOrBench() bool {
var kinds []string
if *doBuild {
kinds = append(kinds, "build-go-commit")
}
if *doBench {
kinds = append(kinds, "benchmark-go-commit")
}
kind, hash, benchs, err := b.todo(kinds, "", "")
if err != nil {
log.Println(err)
return false
}
if hash == "" {
return false
}
switch kind {
case "build-go-commit":
if err := b.buildHash(hash); err != nil {
log.Println(err)
}
return true
case "benchmark-go-commit":
if err := b.benchHash(hash, benchs); err != nil {
log.Println(err)
}
return true
default:
log.Printf("Unknown todo kind %v", kind)
return false
}
}
func (b *Builder) buildHash(hash string) error {
log.Println(b.name, "building", hash)
// create place in which to do work
workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
if err := os.Mkdir(workpath, mkdirPerm); err != nil {
if err2 := removePath(workpath); err2 != nil {
return err
}
if err := os.Mkdir(workpath, mkdirPerm); err != nil {
return err
}
}
defer removePath(workpath)
buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd())
if err != nil {
// record failure
return b.recordResult(false, "", hash, "", buildLog, runTime)
}
// record success
if err = b.recordResult(true, "", hash, "", "", runTime); err != nil {
return fmt.Errorf("recordResult: %s", err)
}
if *buildTool == "go" {
// build sub-repositories
goRoot := filepath.Join(workpath, *buildTool)
goPath := workpath
b.buildSubrepos(goRoot, goPath, hash)
}
return nil
}
// buildRepoOnHash clones repo into workpath and builds it.
func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) {
// Delete the previous workdir, if necessary
// (benchmarking code can execute several benchmarks in the same workpath).
if b.lastWorkpath != "" {
if b.lastWorkpath == workpath {
panic("workpath already exists: " + workpath)
}
removePath(b.lastWorkpath)
b.lastWorkpath = ""
}
// pull before cloning to ensure we have the revision
if err = b.goroot.Pull(); err != nil {
buildLog = err.Error()
return
}
// set up builder's environment.
srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv())
if err != nil {
buildLog = err.Error()
return
}
// build
var buildbuf bytes.Buffer
logfile := filepath.Join(workpath, "build.log")
f, err := os.Create(logfile)
if err != nil {
return err.Error(), 0, err
}
defer f.Close()
w := io.MultiWriter(f, &buildbuf)
// go's build command is a script relative to the srcDir, whereas
// gccgo's build command is usually "make check-go" in the srcDir.
if *buildTool == "go" {
if !filepath.IsAbs(cmd) {
cmd = filepath.Join(srcDir, cmd)
}
}
// naive splitting of command from its arguments:
args := strings.Split(cmd, " ")
c := exec.Command(args[0], args[1:]...)
c.Dir = srcDir
c.Env = b.envv()
if *verbose {
c.Stdout = io.MultiWriter(os.Stdout, w)
c.Stderr = io.MultiWriter(os.Stderr, w)
} else {
c.Stdout = w
c.Stderr = w
}
startTime := time.Now()
err = run(c, runTimeout(*buildTimeout))
runTime = time.Since(startTime)
if err != nil {
fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err)
} else {
fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime)
}
return buildbuf.String(), runTime, err
}
// failBuild checks for a new commit for this builder
// and fails it if one is found.
// It returns true if a build was "attempted".
func (b *Builder) failBuild() bool {
_, hash, _, err := b.todo([]string{"build-go-commit"}, "", "")
if err != nil {
log.Println(err)
return false
}
if hash == "" {
return false
}
log.Printf("fail %s %s\n", b.name, hash)
if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil {
log.Print(err)
}
return true
}
func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) {
for _, pkg := range dashboardPackages("subrepo") {
// get the latest todo for this package
_, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash)
if err != nil {
log.Printf("buildSubrepos %s: %v", pkg, err)
continue
}
if hash == "" {
continue
}
// build the package
if *verbose {
log.Printf("buildSubrepos %s: building %q", pkg, hash)
}
buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash)
if err != nil {
if buildLog == "" {
buildLog = err.Error()
}
log.Printf("buildSubrepos %s: %v", pkg, err)
}
// record the result
err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0)
if err != nil {
log.Printf("buildSubrepos %s: %v", pkg, err)
}
}
}
// buildSubrepo fetches the given package, updates it to the specified hash,
// and runs 'go test -short pkg/...'. It returns the build log and any error.
func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) {
goTool := filepath.Join(goRoot, "bin", "go") + exeExt
env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath)
// add $GOROOT/bin and $GOPATH/bin to PATH
for i, e := range env {
const p = "PATH="
if !strings.HasPrefix(e, p) {
continue
}
sep := string(os.PathListSeparator)
env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):]
}
// HACK: check out to new sub-repo location instead of old location.
pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1)
// fetch package and dependencies
var outbuf bytes.Buffer
err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath))
if err != nil {
return outbuf.String(), err
}
outbuf.Reset()
// hg update to the specified hash
pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
if err != nil {
return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err)
}
repo := &Repo{
Path: filepath.Join(goPath, "src", pkg),
Master: pkgmaster,
}
if err := repo.UpdateTo(hash); err != nil {
return "", err
}
// test the package
err = run(exec.Command(goTool, "test", "-short", pkg+"/..."),
runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath))
return outbuf.String(), err
}
// repoForTool returns the correct RepoRoot for the buildTool, or an error if
// the tool is unknown.
func repoForTool() (*vcs.RepoRoot, error) {
switch *buildTool {
case "go":
return vcs.RepoRootForImportPath(*gcPath, *verbose)
case "gccgo":
return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose)
default:
return nil, fmt.Errorf("unknown build tool: %s", *buildTool)
}
}
func isDirectory(name string) bool {
s, err := os.Stat(name)
return err == nil && s.IsDir()
}
func isFile(name string) bool {
s, err := os.Stat(name)
return err == nil && !s.IsDir()
}
// defaultSuffix returns file extension used for command files in
// current os environment.
func defaultSuffix() string {
switch runtime.GOOS {
case "windows":
return ".bat"
case "plan9":
return ".rc"
default:
return ".bash"
}
}
func defaultExeExt() string {
switch runtime.GOOS {
case "windows":
return ".exe"
default:
return ""
}
}
// defaultBuildRoot returns default buildroot directory.
func defaultBuildRoot() string {
var d string
if runtime.GOOS == "windows" {
// will use c:\, otherwise absolute paths become too long
// during builder run, see http://golang.org/issue/3358.
d = `c:\`
} else {
d = os.TempDir()
}
return filepath.Join(d, "gobuilder")
}
// removePath is a more robust version of os.RemoveAll.
// On windows, if remove fails (which can happen if test/benchmark timeouts
// and keeps some files open) it tries to rename the dir.
func removePath(path string) error {
if err := os.RemoveAll(path); err != nil {
if runtime.GOOS == "windows" {
err = os.Rename(path, filepath.Clean(path)+"_remove_me")
}
return err
}
return nil
}
|
[
"\"home\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\"",
"\"USER\""
] |
[] |
[
"home",
"HOMEPATH",
"HOMEDRIVE",
"USER",
"HOME"
] |
[]
|
["home", "HOMEPATH", "HOMEDRIVE", "USER", "HOME"]
|
go
| 5 | 0 | |
cmd/zoekt-webserver/main.go
|
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command zoekt-webserver responds to search queries, using an index generated
// by another program such as zoekt-indexserver.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"syscall"
"time"
"cloud.google.com/go/profiler"
"github.com/google/zoekt"
"github.com/google/zoekt/build"
"github.com/google/zoekt/debugserver"
"github.com/google/zoekt/query"
"github.com/google/zoekt/shards"
"github.com/google/zoekt/stream"
"github.com/google/zoekt/web"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.uber.org/automaxprocs/maxprocs"
"github.com/uber/jaeger-client-go"
jaegercfg "github.com/uber/jaeger-client-go/config"
jaegerlog "github.com/uber/jaeger-client-go/log"
jaegermetrics "github.com/uber/jaeger-lib/metrics"
)
const logFormat = "2006-01-02T15-04-05.999999999Z07"
func divertLogs(dir string, interval time.Duration) {
t := time.NewTicker(interval)
var last *os.File
for {
nm := filepath.Join(dir, fmt.Sprintf("zoekt-webserver.%s.%d.log", time.Now().Format(logFormat), os.Getpid()))
fmt.Fprintf(os.Stderr, "writing logs to %s\n", nm)
f, err := os.Create(nm)
if err != nil {
// There is not much we can do now.
fmt.Fprintf(os.Stderr, "can't create output file %s: %v\n", nm, err)
os.Exit(2)
}
log.SetOutput(f)
last.Close()
last = f
<-t.C
}
}
const templateExtension = ".html.tpl"
func loadTemplates(tpl *template.Template, dir string) error {
fs, err := filepath.Glob(dir + "/*" + templateExtension)
if err != nil {
log.Fatalf("Glob: %v", err)
}
log.Printf("loading templates: %v", fs)
for _, fn := range fs {
content, err := ioutil.ReadFile(fn)
if err != nil {
return err
}
base := filepath.Base(fn)
base = strings.TrimSuffix(base, templateExtension)
if _, err := tpl.New(base).Parse(string(content)); err != nil {
return fmt.Errorf("template.Parse(%s): %v", fn, err)
}
}
return nil
}
func writeTemplates(dir string) error {
if dir == "" {
return fmt.Errorf("must set --template_dir")
}
for k, v := range web.TemplateText {
nm := filepath.Join(dir, k+templateExtension)
if err := ioutil.WriteFile(nm, []byte(v), 0o644); err != nil {
return err
}
}
return nil
}
func main() {
logDir := flag.String("log_dir", "", "log to this directory rather than stderr.")
logRefresh := flag.Duration("log_refresh", 24*time.Hour, "if using --log_dir, start writing a new file this often.")
listen := flag.String("listen", ":6070", "listen on this address.")
index := flag.String("index", build.DefaultDir, "set index directory to use")
html := flag.Bool("html", true, "enable HTML interface")
enableRPC := flag.Bool("rpc", false, "enable go/net RPC")
print := flag.Bool("print", false, "enable local result URLs")
enablePprof := flag.Bool("pprof", false, "set to enable remote profiling.")
sslCert := flag.String("ssl_cert", "", "set path to SSL .pem holding certificate.")
sslKey := flag.String("ssl_key", "", "set path to SSL .pem holding key.")
hostCustomization := flag.String(
"host_customization", "",
"specify host customization, as HOST1=QUERY,HOST2=QUERY")
templateDir := flag.String("template_dir", "", "set directory from which to load custom .html.tpl template files")
dumpTemplates := flag.Bool("dump_templates", false, "dump templates into --template_dir and exit.")
version := flag.Bool("version", false, "Print version number")
flag.Parse()
if *version {
fmt.Printf("zoekt-webserver version %q\n", zoekt.Version)
os.Exit(0)
}
if *dumpTemplates {
if err := writeTemplates(*templateDir); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
initializeJaeger()
initializeGoogleCloudProfiler()
if *logDir != "" {
if fi, err := os.Lstat(*logDir); err != nil || !fi.IsDir() {
log.Fatalf("%s is not a directory", *logDir)
}
// We could do fdup acrobatics to also redirect
// stderr, but it is simpler and more portable for the
// caller to divert stderr output if necessary.
go divertLogs(*logDir, *logRefresh)
}
// Tune GOMAXPROCS to match Linux container CPU quota.
_, _ = maxprocs.Set()
if err := os.MkdirAll(*index, 0o755); err != nil {
log.Fatal(err)
}
mustRegisterDiskMonitor(*index)
searcher, err := shards.NewDirectorySearcher(*index)
if err != nil {
log.Fatal(err)
}
// Sourcegraph: Add logging if debug logging enabled
logLvl := os.Getenv("SRC_LOG_LEVEL")
debug := logLvl == "" || strings.EqualFold(logLvl, "dbug")
if debug {
searcher = &loggedSearcher{Streamer: searcher}
}
s := &web.Server{
Searcher: searcher,
Top: web.Top,
Version: zoekt.Version,
}
if *templateDir != "" {
if err := loadTemplates(s.Top, *templateDir); err != nil {
log.Fatalf("loadTemplates: %v", err)
}
}
s.Print = *print
s.HTML = *html
s.RPC = *enableRPC
if *hostCustomization != "" {
s.HostCustomQueries = map[string]string{}
for _, h := range strings.SplitN(*hostCustomization, ",", -1) {
if len(h) == 0 {
continue
}
fields := strings.SplitN(h, "=", 2)
if len(fields) < 2 {
log.Fatalf("invalid host_customization %q", h)
}
s.HostCustomQueries[fields[0]] = fields[1]
}
}
handler, err := web.NewMux(s)
if err != nil {
log.Fatal(err)
}
debugserver.AddHandlers(handler, *enablePprof)
handler.HandleFunc("/healthz", healthz)
// Sourcegraph: We use environment variables to configure watchdog since
// they are more convenient than flags in containerized environments.
watchdogTick := 30 * time.Second
if v := os.Getenv("ZOEKT_WATCHDOG_TICK"); v != "" {
watchdogTick, _ = time.ParseDuration(v)
log.Printf("custom ZOEKT_WATCHDOG_TICK=%v", watchdogTick)
}
watchdogErrCount := 3
if v := os.Getenv("ZOEKT_WATCHDOG_ERRORS"); v != "" {
watchdogErrCount, _ = strconv.Atoi(v)
log.Printf("custom ZOEKT_WATCHDOG_ERRORS=%d", watchdogErrCount)
}
watchdogAddr := "http://" + *listen
if *sslCert != "" || *sslKey != "" {
watchdogAddr = "https://" + *listen
}
if watchdogErrCount > 0 && watchdogTick > 0 {
go watchdog(watchdogTick, watchdogErrCount, watchdogAddr)
} else {
log.Println("watchdog disabled")
}
srv := &http.Server{Addr: *listen, Handler: handler}
go func() {
if debug {
log.Printf("listening on %v", *listen)
}
var err error
if *sslCert != "" || *sslKey != "" {
err = srv.ListenAndServeTLS(*sslCert, *sslKey)
} else {
err = srv.ListenAndServe()
}
if err != http.ErrServerClosed {
// Fatal otherwise shutdownOnSignal will block
log.Fatalf("ListenAndServe: %v", err)
}
}()
if err := shutdownOnSignal(srv); err != nil {
log.Fatalf("http.Server.Shutdown: %v", err)
}
}
// shutdownOnSignal will listen for SIGINT or SIGTERM and call
// srv.Shutdown. Note it doesn't call anything else for shutting down. Notably
// our RPC framework doesn't allow us to drain connections, so it when
// Shutdown is called all inflight RPC requests will be closed.
func shutdownOnSignal(srv *http.Server) error {
c := make(chan os.Signal, 3)
signal.Notify(c, os.Interrupt) // terminal C-c and goreman
signal.Notify(c, syscall.SIGTERM) // Kubernetes
sig := <-c
// If we receive another signal, immediate shutdown
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
select {
case <-ctx.Done():
case sig := <-c:
log.Printf("received another signal (%v), immediate shutdown", sig)
cancel()
}
}()
// SIGTERM is sent by kubernetes. We give 15s to allow our endpoint to be
// removed from service discovery before draining traffic.
if sig == syscall.SIGTERM {
wait := 15 * time.Second
log.Printf("received SIGTERM, waiting %v before shutting down", wait)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(wait):
}
}
// Wait for 10s to drain ongoing requests. Kubernetes gives us 30s to
// shutdown, we have already used 15s waiting for our endpoint removal to
// propagate.
ctx, cancel2 := context.WithTimeout(ctx, 10*time.Second)
defer cancel2()
log.Printf("shutting down")
return srv.Shutdown(ctx)
}
// Always returns 200 OK.
// Used for kubernetes liveness and readiness checks.
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
func healthz(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write([]byte("OK"))
}
func watchdogOnce(ctx context.Context, client *http.Client, addr string) error {
defer metricWatchdogTotal.Inc()
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(30*time.Second))
defer cancel()
req, err := http.NewRequest("GET", addr, nil)
if err != nil {
return err
}
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("watchdog: status %v", resp.StatusCode)
}
return nil
}
func watchdog(dt time.Duration, maxErrCount int, addr string) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{
Transport: tr,
}
tick := time.NewTicker(dt)
errCount := 0
for range tick.C {
err := watchdogOnce(context.Background(), client, addr)
if err != nil {
errCount++
metricWatchdogErrors.Set(float64(errCount))
metricWatchdogErrorsTotal.Inc()
if errCount >= maxErrCount {
log.Panicf("watchdog: %v", err)
} else {
log.Printf("watchdog: failed, will try %d more times: %v", maxErrCount-errCount, err)
}
} else if errCount > 0 {
errCount = 0
metricWatchdogErrors.Set(float64(errCount))
log.Printf("watchdog: success, resetting error count")
}
}
}
func mustRegisterDiskMonitor(path string) {
prometheus.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_disk_space_available_bytes",
Help: "Amount of free space disk space.",
ConstLabels: prometheus.Labels{"path": path},
}, func() float64 {
var stat syscall.Statfs_t
_ = syscall.Statfs(path, &stat)
return float64(stat.Bavail * uint64(stat.Bsize))
}))
prometheus.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_disk_space_total_bytes",
Help: "Amount of total disk space.",
ConstLabels: prometheus.Labels{"path": path},
}, func() float64 {
var stat syscall.Statfs_t
_ = syscall.Statfs(path, &stat)
return float64(stat.Blocks * uint64(stat.Bsize))
}))
}
type loggedSearcher struct {
zoekt.Streamer
}
func (s *loggedSearcher) Search(ctx context.Context, q query.Q, opts *zoekt.SearchOptions) (*zoekt.SearchResult, error) {
sr, err := s.Streamer.Search(ctx, q, opts)
if err != nil {
log.Printf("EROR: search failed q=%s: %s", q.String(), err.Error())
}
if sr != nil {
log.Printf("DBUG: search q=%s Options{EstimateDocCount=%v Whole=%v ShardMaxMatchCount=%v TotalMaxMatchCount=%v ShardMaxImportantMatch=%v TotalMaxImportantMatch=%v MaxWallTime=%v MaxDocDisplayCount=%v} Stats{ContentBytesLoaded=%v IndexBytesLoaded=%v Crashes=%v Duration=%v FileCount=%v ShardFilesConsidered=%v FilesConsidered=%v FilesLoaded=%v FilesSkipped=%v ShardsSkipped=%v MatchCount=%v NgramMatches=%v Wait=%v}", q.String(), opts.EstimateDocCount, opts.Whole, opts.ShardMaxMatchCount, opts.TotalMaxMatchCount, opts.ShardMaxImportantMatch, opts.TotalMaxImportantMatch, opts.MaxWallTime, opts.MaxDocDisplayCount, sr.Stats.ContentBytesLoaded, sr.Stats.IndexBytesLoaded, sr.Stats.Crashes, sr.Stats.Duration, sr.Stats.FileCount, sr.Stats.ShardFilesConsidered, sr.Stats.FilesConsidered, sr.Stats.FilesLoaded, sr.Stats.FilesSkipped, sr.Stats.ShardsSkipped, sr.Stats.MatchCount, sr.Stats.NgramMatches, sr.Stats.Wait)
}
return sr, err
}
func (s *loggedSearcher) StreamSearch(ctx context.Context, q query.Q, opts *zoekt.SearchOptions, sender zoekt.Sender) error {
var (
mu sync.Mutex
stats zoekt.Stats
)
err := s.Streamer.StreamSearch(ctx, q, opts, stream.SenderFunc(func(event *zoekt.SearchResult) {
mu.Lock()
stats.Add(event.Stats)
mu.Unlock()
sender.Send(event)
}))
if err != nil {
log.Printf("EROR: search failed q=%s: %s", q.String(), err.Error())
}
log.Printf("DBUG: search q=%s Options{EstimateDocCount=%v Whole=%v ShardMaxMatchCount=%v TotalMaxMatchCount=%v ShardMaxImportantMatch=%v TotalMaxImportantMatch=%v MaxWallTime=%v MaxDocDisplayCount=%v} Stats{ContentBytesLoaded=%v IndexBytesLoaded=%v Crashes=%v Duration=%v FileCount=%v ShardFilesConsidered=%v FilesConsidered=%v FilesLoaded=%v FilesSkipped=%v ShardsSkipped=%v MatchCount=%v NgramMatches=%v Wait=%v}", q.String(), opts.EstimateDocCount, opts.Whole, opts.ShardMaxMatchCount, opts.TotalMaxMatchCount, opts.ShardMaxImportantMatch, opts.TotalMaxImportantMatch, opts.MaxWallTime, opts.MaxDocDisplayCount, stats.ContentBytesLoaded, stats.IndexBytesLoaded, stats.Crashes, stats.Duration, stats.FileCount, stats.ShardFilesConsidered, stats.FilesConsidered, stats.FilesLoaded, stats.FilesSkipped, stats.ShardsSkipped, stats.MatchCount, stats.NgramMatches, stats.Wait)
return err
}
func initializeJaeger() {
jaegerDisabled := os.Getenv("JAEGER_DISABLED")
if jaegerDisabled == "" {
return
}
isJaegerDisabled, err := strconv.ParseBool(jaegerDisabled)
if err != nil {
log.Printf("EROR: failed to parse JAEGER_DISABLED: %s", err)
return
}
if isJaegerDisabled {
return
}
cfg, err := jaegercfg.FromEnv()
cfg.ServiceName = "zoekt"
if err != nil {
log.Printf("EROR: could not initialize jaeger tracer from env, error: %v", err.Error())
return
}
if reflect.DeepEqual(cfg.Sampler, &jaegercfg.SamplerConfig{}) {
// Default sampler configuration for when it is not specified via
// JAEGER_SAMPLER_* env vars. In most cases, this is sufficient
// enough to connect to Jaeger without any env vars.
cfg.Sampler.Type = jaeger.SamplerTypeConst
cfg.Sampler.Param = 1
}
tracer, _, err := cfg.NewTracer(
jaegercfg.Logger(jaegerlog.StdLogger),
jaegercfg.Metrics(jaegermetrics.NullFactory),
)
if err != nil {
log.Printf("could not initialize jaeger tracer, error: %v", err.Error())
}
opentracing.SetGlobalTracer(tracer)
}
func initializeGoogleCloudProfiler() {
// Google cloud profiler is opt-in since we only want to run it on
// Sourcegraph.com.
if os.Getenv("GOOGLE_CLOUD_PROFILER_ENABLED") == "" {
return
}
err := profiler.Start(profiler.Config{
Service: "zoekt-webserver",
ServiceVersion: zoekt.Version,
MutexProfiling: true,
AllocForceGC: true,
})
if err != nil {
log.Printf("could not initialize google cloud profiler: %s", err.Error())
}
}
var (
metricWatchdogErrors = promauto.NewGauge(prometheus.GaugeOpts{
Name: "zoekt_webserver_watchdog_errors",
Help: "The current error count for zoekt watchdog.",
})
metricWatchdogTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "zoekt_webserver_watchdog_total",
Help: "The total number of requests done by zoekt watchdog.",
})
metricWatchdogErrorsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "zoekt_webserver_watchdog_errors_total",
Help: "The total number of errors from zoekt watchdog.",
})
)
|
[
"\"SRC_LOG_LEVEL\"",
"\"ZOEKT_WATCHDOG_TICK\"",
"\"ZOEKT_WATCHDOG_ERRORS\"",
"\"JAEGER_DISABLED\"",
"\"GOOGLE_CLOUD_PROFILER_ENABLED\""
] |
[] |
[
"JAEGER_DISABLED",
"ZOEKT_WATCHDOG_TICK",
"ZOEKT_WATCHDOG_ERRORS",
"SRC_LOG_LEVEL",
"GOOGLE_CLOUD_PROFILER_ENABLED"
] |
[]
|
["JAEGER_DISABLED", "ZOEKT_WATCHDOG_TICK", "ZOEKT_WATCHDOG_ERRORS", "SRC_LOG_LEVEL", "GOOGLE_CLOUD_PROFILER_ENABLED"]
|
go
| 5 | 0 | |
hazelcast/src/main/java/com/hazelcast/internal/util/PhoneHome.java
|
/*
* Copyright (c) 2008-2019, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.internal.util;
import com.hazelcast.instance.BuildInfo;
import com.hazelcast.instance.BuildInfoProvider;
import com.hazelcast.instance.JetBuildInfo;
import com.hazelcast.instance.impl.Node;
import com.hazelcast.internal.cluster.impl.ClusterServiceImpl;
import com.hazelcast.internal.nio.ConnectionType;
import com.hazelcast.logging.ILogger;
import com.hazelcast.spi.properties.ClusterProperty;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import static com.hazelcast.internal.nio.IOUtil.closeResource;
import static com.hazelcast.internal.util.EmptyStatement.ignore;
import static com.hazelcast.internal.util.ExceptionUtil.rethrow;
import static java.lang.System.getenv;
/**
* Pings phone home server with cluster info daily.
*/
public class PhoneHome {
private static final int TIMEOUT = 1000;
private static final int A_INTERVAL = 5;
private static final int B_INTERVAL = 10;
private static final int C_INTERVAL = 20;
private static final int D_INTERVAL = 40;
private static final int E_INTERVAL = 60;
private static final int F_INTERVAL = 100;
private static final int G_INTERVAL = 150;
private static final int H_INTERVAL = 300;
private static final int J_INTERVAL = 600;
private static final String BASE_PHONE_HOME_URL = "http://phonehome.hazelcast.com/ping";
private static final int CONNECTION_TIMEOUT_MILLIS = 3000;
private static final String FALSE = "false";
volatile ScheduledFuture<?> phoneHomeFuture;
private final ILogger logger;
private final BuildInfo buildInfo = BuildInfoProvider.getBuildInfo();
public PhoneHome(Node hazelcastNode) {
logger = hazelcastNode.getLogger(PhoneHome.class);
}
public void check(final Node hazelcastNode) {
if (!hazelcastNode.getProperties().getBoolean(ClusterProperty.PHONE_HOME_ENABLED)) {
return;
}
if (FALSE.equals(getenv("HZ_PHONE_HOME_ENABLED"))) {
return;
}
try {
phoneHomeFuture = hazelcastNode.nodeEngine.getExecutionService()
.scheduleWithRepetition("PhoneHome",
() -> phoneHome(hazelcastNode, false), 0, 1, TimeUnit.DAYS);
} catch (RejectedExecutionException e) {
logger.warning("Could not schedule phone home task! Most probably Hazelcast failed to start.");
}
}
public void shutdown() {
if (phoneHomeFuture != null) {
phoneHomeFuture.cancel(true);
}
}
public String convertToLetter(int size) {
String letter;
if (size < A_INTERVAL) {
letter = "A";
} else if (size < B_INTERVAL) {
letter = "B";
} else if (size < C_INTERVAL) {
letter = "C";
} else if (size < D_INTERVAL) {
letter = "D";
} else if (size < E_INTERVAL) {
letter = "E";
} else if (size < F_INTERVAL) {
letter = "F";
} else if (size < G_INTERVAL) {
letter = "G";
} else if (size < H_INTERVAL) {
letter = "H";
} else if (size < J_INTERVAL) {
letter = "J";
} else {
letter = "I";
}
return letter;
}
/**
* Performs a phone request for {@code node} and returns the generated request
* parameters. If {@code pretend} is {@code true}, only returns the parameters
* without actually performing the request.
*
* @param node the node for which to make the phone home request
* @param pretend if {@code true}, do not perform the request
* @return the generated request parameters
*/
public Map<String, String> phoneHome(Node node, boolean pretend) {
PhoneHomeParameterCreator parameterCreator = createParameters(node);
if (!pretend) {
String urlStr = BASE_PHONE_HOME_URL + parameterCreator.build();
fetchWebService(urlStr);
}
return parameterCreator.getParameters();
}
public PhoneHomeParameterCreator createParameters(Node hazelcastNode) {
ClusterServiceImpl clusterService = hazelcastNode.getClusterService();
int clusterSize = clusterService.getMembers().size();
Long clusterUpTime = clusterService.getClusterClock().getClusterUpTime();
RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
JetBuildInfo jetBuildInfo = hazelcastNode.getBuildInfo().getJetBuildInfo();
PhoneHomeParameterCreator parameterCreator = new PhoneHomeParameterCreator()
.addParam("version", buildInfo.getVersion())
.addParam("m", hazelcastNode.getThisUuid().toString())
.addParam("p", getDownloadId())
.addParam("c", clusterService.getClusterId().toString())
.addParam("crsz", convertToLetter(clusterSize))
.addParam("cssz", convertToLetter(hazelcastNode.clientEngine.getClientEndpointCount()))
.addParam("cuptm", Long.toString(clusterUpTime))
.addParam("nuptm", Long.toString(runtimeMxBean.getUptime()))
.addParam("jvmn", runtimeMxBean.getVmName())
.addParam("jvmv", System.getProperty("java.version"))
.addParam("jetv", jetBuildInfo == null ? "" : jetBuildInfo.getVersion());
addClientInfo(hazelcastNode, parameterCreator);
addOSInfo(parameterCreator);
return parameterCreator;
}
private String getDownloadId() {
String downloadId = "source";
InputStream is = null;
try {
is = getClass().getClassLoader().getResourceAsStream("hazelcast-download.properties");
if (is != null) {
final Properties properties = new Properties();
properties.load(is);
downloadId = properties.getProperty("hazelcastDownloadId");
}
} catch (IOException ignored) {
ignore(ignored);
} finally {
closeResource(is);
}
return downloadId;
}
private void fetchWebService(String urlStr) {
InputStream in = null;
try {
URL url = new URL(urlStr);
URLConnection conn = url.openConnection();
conn.setRequestProperty("User-Agent", "Mozilla/5.0");
conn.setConnectTimeout(TIMEOUT * 2);
conn.setReadTimeout(TIMEOUT * 2);
in = new BufferedInputStream(conn.getInputStream());
} catch (Exception ignored) {
ignore(ignored);
} finally {
closeResource(in);
}
}
private void addOSInfo(PhoneHomeParameterCreator parameterCreator) {
OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean();
try {
parameterCreator
.addParam("osn", osMxBean.getName())
.addParam("osa", osMxBean.getArch())
.addParam("osv", osMxBean.getVersion());
} catch (SecurityException e) {
parameterCreator
.addParam("osn", "N/A")
.addParam("osa", "N/A")
.addParam("osv", "N/A");
}
}
private void addClientInfo(Node hazelcastNode, PhoneHomeParameterCreator parameterCreator) {
Map<String, Integer> clusterClientStats = hazelcastNode.clientEngine.getConnectedClientStats();
parameterCreator
.addParam("ccpp", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.CPP_CLIENT, 0)))
.addParam("cdn", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.CSHARP_CLIENT, 0)))
.addParam("cjv", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.JAVA_CLIENT, 0)))
.addParam("cnjs", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.NODEJS_CLIENT, 0)))
.addParam("cpy", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.PYTHON_CLIENT, 0)))
.addParam("cgo", Integer.toString(clusterClientStats.getOrDefault(ConnectionType.GO_CLIENT, 0)));
}
private void checkClusterSizeAndSetLicense(int clusterSize, PhoneHomeParameterCreator parameterCreator) {
if (clusterSize <= 2) {
parameterCreator.addParam("mclicense", "MC_LICENSE_NOT_REQUIRED");
} else {
parameterCreator.addParam("mclicense", "MC_LICENSE_REQUIRED_BUT_NOT_SET");
}
}
/**
* Util class for parameters of OS and EE PhoneHome pings.
*/
public static class PhoneHomeParameterCreator {
private final StringBuilder builder;
private final Map<String, String> parameters = new HashMap<>();
private boolean hasParameterBefore;
public PhoneHomeParameterCreator() {
builder = new StringBuilder();
builder.append("?");
}
Map<String, String> getParameters() {
return parameters;
}
public PhoneHomeParameterCreator addParam(String key, String value) {
if (hasParameterBefore) {
builder.append("&");
} else {
hasParameterBefore = true;
}
try {
builder.append(key).append("=").append(URLEncoder.encode(value, "UTF-8"));
} catch (UnsupportedEncodingException e) {
throw rethrow(e);
}
parameters.put(key, value);
return this;
}
String build() {
return builder.toString();
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
pkg/mqtrigger/messageQueue/kafka/kafka.go
|
/*
Copyright 2016 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kafka
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strconv"
"strings"
sarama "github.com/Shopify/sarama"
cluster "github.com/bsm/sarama-cluster"
"github.com/pkg/errors"
"go.uber.org/zap"
fv1 "github.com/fission/fission/pkg/apis/core/v1"
"github.com/fission/fission/pkg/mqtrigger/messageQueue"
"github.com/fission/fission/pkg/utils"
)
var (
// Need to use raw string to support escape sequence for - & . chars
validKafkaTopicName = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9\-\._]*[a-zA-Z0-9]$`)
)
type (
Kafka struct {
logger *zap.Logger
routerUrl string
brokers []string
version sarama.KafkaVersion
authKeys map[string][]byte
tls bool
}
)
func New(logger *zap.Logger, routerUrl string, mqCfg messageQueue.Config) (messageQueue.MessageQueue, error) {
if len(routerUrl) == 0 || len(mqCfg.Url) == 0 {
return nil, errors.New("the router URL or MQ URL is empty")
}
mqKafkaVersion := os.Getenv("MESSAGE_QUEUE_KAFKA_VERSION")
// Parse version string
kafkaVersion, err := sarama.ParseKafkaVersion(mqKafkaVersion)
if err != nil {
logger.Warn("error parsing kafka version string - falling back to default",
zap.Error(err),
zap.String("failed_version", mqKafkaVersion),
zap.Any("default_version", kafkaVersion))
}
kafka := Kafka{
logger: logger.Named("kafka"),
routerUrl: routerUrl,
brokers: strings.Split(mqCfg.Url, ","),
version: kafkaVersion,
}
if tls, _ := strconv.ParseBool(os.Getenv("TLS_ENABLED")); tls {
kafka.tls = true
authKeys := make(map[string][]byte)
if mqCfg.Secrets == nil {
return nil, errors.New("no secrets were loaded")
}
authKeys["caCert"] = mqCfg.Secrets["caCert"]
authKeys["userCert"] = mqCfg.Secrets["userCert"]
authKeys["userKey"] = mqCfg.Secrets["userKey"]
kafka.authKeys = authKeys
}
logger.Info("created kafka queue", zap.Any("kafka brokers", kafka.brokers),
zap.Any("kafka version", kafka.version))
return kafka, nil
}
func (kafka Kafka) Subscribe(trigger *fv1.MessageQueueTrigger) (messageQueue.Subscription, error) {
kafka.logger.Info("inside kakfa subscribe", zap.Any("trigger", trigger))
kafka.logger.Info("brokers set", zap.Strings("brokers", kafka.brokers))
// Create new consumer
consumerConfig := cluster.NewConfig()
consumerConfig.Consumer.Return.Errors = true
consumerConfig.Group.Return.Notifications = true
consumerConfig.Config.Version = kafka.version
// Create new producer
producerConfig := sarama.NewConfig()
producerConfig.Producer.RequiredAcks = sarama.WaitForAll
producerConfig.Producer.Retry.Max = 10
producerConfig.Producer.Return.Successes = true
producerConfig.Version = kafka.version
// Setup TLS for both producer and consumer
if kafka.tls {
consumerConfig.Net.TLS.Enable = true
producerConfig.Net.TLS.Enable = true
tlsConfig, err := kafka.getTLSConfig()
if err != nil {
return nil, err
}
producerConfig.Net.TLS.Config = tlsConfig
consumerConfig.Net.TLS.Config = tlsConfig
}
consumer, err := cluster.NewConsumer(kafka.brokers, string(trigger.ObjectMeta.UID), []string{trigger.Spec.Topic}, consumerConfig)
kafka.logger.Info("created a new consumer", zap.Strings("brokers", kafka.brokers),
zap.String("input topic", trigger.Spec.Topic),
zap.String("output topic", trigger.Spec.ResponseTopic),
zap.String("error topic", trigger.Spec.ErrorTopic),
zap.String("trigger name", trigger.ObjectMeta.Name),
zap.String("function namespace", trigger.ObjectMeta.Namespace),
zap.String("function name", trigger.Spec.FunctionReference.Name))
if err != nil {
return nil, err
}
producer, err := sarama.NewSyncProducer(kafka.brokers, producerConfig)
kafka.logger.Info("created a new producer", zap.Strings("brokers", kafka.brokers),
zap.String("input topic", trigger.Spec.Topic),
zap.String("output topic", trigger.Spec.ResponseTopic),
zap.String("error topic", trigger.Spec.ErrorTopic),
zap.String("trigger name", trigger.ObjectMeta.Name),
zap.String("function namespace", trigger.ObjectMeta.Namespace),
zap.String("function name", trigger.Spec.FunctionReference.Name))
if err != nil {
return nil, err
}
// consume errors
go func() {
for err := range consumer.Errors() {
kafka.logger.Error("consumer error", zap.Error(err))
}
}()
// consume notifications
go func() {
for ntf := range consumer.Notifications() {
kafka.logger.Info("consumer notification", zap.Any("notification", ntf))
}
}()
// consume messages
go func() {
for msg := range consumer.Messages() {
kafka.logger.Debug("calling message handler", zap.String("message", string(msg.Value[:])))
go kafkaMsgHandler(&kafka, producer, trigger, msg, consumer)
}
}()
return consumer, nil
}
func (kafka Kafka) getTLSConfig() (*tls.Config, error) {
tlsConfig := tls.Config{}
cert, err := tls.X509KeyPair(kafka.authKeys["userCert"], kafka.authKeys["userKey"])
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(kafka.authKeys["caCert"])
tlsConfig.RootCAs = caCertPool
tlsConfig.BuildNameToCertificate()
return &tlsConfig, nil
}
func (kafka Kafka) Unsubscribe(subscription messageQueue.Subscription) error {
return subscription.(*cluster.Consumer).Close()
}
func kafkaMsgHandler(kafka *Kafka, producer sarama.SyncProducer, trigger *fv1.MessageQueueTrigger, msg *sarama.ConsumerMessage, consumer *cluster.Consumer) {
var value string = string(msg.Value[:])
// Support other function ref types
if trigger.Spec.FunctionReference.Type != fv1.FunctionReferenceTypeFunctionName {
kafka.logger.Fatal("unsupported function reference type for trigger",
zap.Any("function_reference_type", trigger.Spec.FunctionReference.Type),
zap.String("trigger", trigger.ObjectMeta.Name))
}
url := kafka.routerUrl + "/" + strings.TrimPrefix(utils.UrlForFunction(trigger.Spec.FunctionReference.Name, trigger.ObjectMeta.Namespace), "/")
kafka.logger.Debug("making HTTP request", zap.String("url", url))
// Generate the Headers
fissionHeaders := map[string]string{
"X-Fission-MQTrigger-Topic": trigger.Spec.Topic,
"X-Fission-MQTrigger-RespTopic": trigger.Spec.ResponseTopic,
"X-Fission-MQTrigger-ErrorTopic": trigger.Spec.ErrorTopic,
"Content-Type": trigger.Spec.ContentType,
}
// Create request
req, err := http.NewRequest("POST", url, strings.NewReader(value))
if err != nil {
kafka.logger.Error("failed to create HTTP request to invoke function",
zap.Error(err),
zap.String("function_url", url))
return
}
// Set the headers came from Kafka record
// Using Header.Add() as msg.Headers may have keys with more than one value
if kafka.version.IsAtLeast(sarama.V0_11_0_0) {
for _, h := range msg.Headers {
req.Header.Add(string(h.Key), string(h.Value))
}
} else {
kafka.logger.Warn("headers are not supported by current Kafka version, needs v0.11+: no record headers to add in HTTP request",
zap.Any("current_version", kafka.version))
}
for k, v := range fissionHeaders {
req.Header.Set(k, v)
}
// Make the request
var resp *http.Response
for attempt := 0; attempt <= trigger.Spec.MaxRetries; attempt++ {
// Make the request
resp, err = http.DefaultClient.Do(req)
if err != nil {
kafka.logger.Error("sending function invocation request failed",
zap.Error(err),
zap.String("function_url", url),
zap.String("trigger", trigger.ObjectMeta.Name))
continue
}
if resp == nil {
continue
}
if err == nil && resp.StatusCode == http.StatusOK {
// Success, quit retrying
break
}
}
if resp == nil {
kafka.logger.Warn("every function invocation retry failed; final retry gave empty response",
zap.String("function_url", url),
zap.String("trigger", trigger.ObjectMeta.Name))
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
kafka.logger.Debug("got response from function invocation",
zap.String("function_url", url),
zap.String("trigger", trigger.ObjectMeta.Name),
zap.String("body", string(body)))
if err != nil {
errorHandler(kafka.logger, trigger, producer, url,
errors.Wrapf(err, "request body error: %v", string(body)))
return
}
if resp.StatusCode != 200 {
errorHandler(kafka.logger, trigger, producer, url,
fmt.Errorf("request returned failure: %v", resp.StatusCode))
return
}
if len(trigger.Spec.ResponseTopic) > 0 {
// Generate Kafka record headers
var kafkaRecordHeaders []sarama.RecordHeader
if kafka.version.IsAtLeast(sarama.V0_11_0_0) {
for k, v := range resp.Header {
// One key may have multiple values
for _, v := range v {
kafkaRecordHeaders = append(kafkaRecordHeaders, sarama.RecordHeader{Key: []byte(k), Value: []byte(v)})
}
}
} else {
kafka.logger.Warn("headers are not supported by current Kafka version, needs v0.11+: no record headers to add in HTTP request",
zap.Any("current_version", kafka.version))
}
_, _, err := producer.SendMessage(&sarama.ProducerMessage{
Topic: trigger.Spec.ResponseTopic,
Value: sarama.StringEncoder(body),
Headers: kafkaRecordHeaders,
})
if err != nil {
kafka.logger.Warn("failed to publish response body from function invocation to topic",
zap.Error(err),
zap.String("topic", trigger.Spec.Topic),
zap.String("function_url", url))
return
}
}
consumer.MarkOffset(msg, "") // mark message as processed
}
func errorHandler(logger *zap.Logger, trigger *fv1.MessageQueueTrigger, producer sarama.SyncProducer, funcUrl string, err error) {
if len(trigger.Spec.ErrorTopic) > 0 {
_, _, e := producer.SendMessage(&sarama.ProducerMessage{
Topic: trigger.Spec.ErrorTopic,
Value: sarama.StringEncoder(err.Error()),
})
if e != nil {
logger.Error("failed to publish message to error topic",
zap.Error(e),
zap.String("trigger", trigger.ObjectMeta.Name),
zap.String("message", err.Error()),
zap.String("topic", trigger.Spec.Topic))
}
} else {
logger.Error("message received to publish to error topic, but no error topic was set",
zap.String("message", err.Error()), zap.String("trigger", trigger.ObjectMeta.Name), zap.String("function_url", funcUrl))
}
}
// The validation is based on Kafka's internal implementation: https://github.com/apache/kafka/blob/cde6d18983b5d58199f8857d8d61d7efcbe6e54a/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L36-L47
func IsTopicValid(topic string) bool {
if len(topic) == 0 {
return false
}
if topic == "." || topic == ".." {
return false
}
if len(topic) > 249 {
return false
}
if !validKafkaTopicName.MatchString(topic) {
return false
}
return true
}
|
[
"\"MESSAGE_QUEUE_KAFKA_VERSION\"",
"\"TLS_ENABLED\""
] |
[] |
[
"TLS_ENABLED",
"MESSAGE_QUEUE_KAFKA_VERSION"
] |
[]
|
["TLS_ENABLED", "MESSAGE_QUEUE_KAFKA_VERSION"]
|
go
| 2 | 0 | |
py/issue_triage/triage.py
|
"""Identify issues that need triage."""
from code_intelligence import graphql
from code_intelligence import util
import datetime
from dateutil import parser as dateutil_parser
import fire
import json
import logging
import os
import numpy as np
import pprint
import retrying
import json
PROJECT_CARD_ID = os.getenv('INPUT_NEEDS_TRIAGE_PROJECT_CARD_ID', "MDEzOlByb2plY3RDb2x1bW41OTM0MzEz")
# TODO(jlewi): If we make this an app maybe we should read this from a .github
# file
ALLOWED_PRIORITY = ["priority/p0", "priority/p1", "priority/p2",
"priority/p3"]
REQUIRES_PROJECT = ["priority/p0", "priority/p1"]
TRIAGE_PROJECT = "Needs Triage"
class TriageInfo:
"""Class describing whether an issue needs triage"""
def __init__(self):
self.issue = None
self.triage_project_card = None
# The times of various events
self.kind_time = None
self.priority_time = None
self.project_time = None
self.area_time = None
self.closed_at = None
self.requires_project = False
@classmethod
def from_issue(cls, issue):
"""Construct TriageInfo from the supplied issue"""
info = TriageInfo()
info.issue = issue
labels = graphql.unpack_and_split_nodes(issue, ["labels", "edges"])
project_cards = graphql.unpack_and_split_nodes(issue,
["projectCards", "edges"])
events = graphql.unpack_and_split_nodes(issue,
["timelineItems", "edges"])
for l in labels:
name = l["name"]
if name in ALLOWED_PRIORITY:
info.requires_project = name in REQUIRES_PROJECT
for c in project_cards:
if c.get("project").get("name") == TRIAGE_PROJECT:
info.triage_project_card = c
break
# TODO(jlewi): Could we potentially miss some events since we aren't
# paginating through all events for an issue? This should no longer
# be an issue because _process_issue will call _get_issue and paginate
# through all results.
for e in events:
if not "createdAt" in e:
continue
t = dateutil_parser.parse(e.get("createdAt"))
if e.get("__typename") == "LabeledEvent":
name = e.get("label").get("name")
if name.startswith("kind"):
if info.kind_time:
continue
info.kind_time = t
if name.startswith("area") or name.startswith("platform"):
if info.area_time:
continue
info.area_time = t
if name in ALLOWED_PRIORITY:
if info.priority_time:
continue
info.priority_time = t
if e.get("__typename") == "AddedToProjectEvent":
if info.project_time:
continue
info.project_time = t
if issue.get("closedAt"):
info.closed_at = dateutil_parser.parse(issue.get("closedAt"))
return info
def __eq__(self, other):
for f in ["kind_time", "priority_time", "project_time", "area_time",
"closed_at", "in_triage_project", "requires_project"]:
if getattr(self, f) != getattr(other, f):
return False
if self.in_triage_project:
if self.triage_project_card["id"] != other.triage_project_card["id"]:
return False
return True
@property
def needs_triage(self):
"""Return true if the issue needs triage"""
# closed issues don't need triage
if self.issue["state"].lower() == "closed":
return False
# If any events are missing then we need triage
for f in ["kind_time", "priority_time", "area_time"]:
if not getattr(self, f):
return True
if self.requires_project and not self.project_time:
return True
return False
def __repr__(self):
pieces = ["needs_triage={0}".format(self.needs_triage)]
for f in ["kind_time", "priority_time", "project_time", "area_time",
"closed_at", "in_triage_project"]:
v = getattr(self, f)
if not v:
continue
if isinstance(v, datetime.datetime):
v = v.isoformat()
pieces.append("{0}={1}".format(f, v))
return ";".join(pieces)
def message(self):
"""Return a human readable message."""
if not self.needs_triage:
return "Issue doesn't need attention."
lines = []
if self.needs_triage:
lines.append("Issue needs triage:")
if not self.kind_time:
lines.append("\t Issue needs a kind label")
if not self.priority_time:
lines.append("\t Issue needs one of the priorities {0}".format(ALLOWED_PRIORITY))
if not self.area_time:
lines.append("\t Issue needs an area label")
if self.requires_project and not self.project_time:
lines.append("\t Issues with priority in {0} need to be assigned to a project".format(REQUIRES_PROJECT))
return "\n".join(lines)
@property
def triaged_at(self):
"""Returns a datetime representing the time it was triage or None."""
if self.needs_triage:
return None
# Determine whether issue was triaged by being closed or not
events = [self.kind_time,
self.priority_time,
self.area_time]
if self.requires_project:
events.append(self.project_time)
has_all_events = True
for e in events:
if not e:
has_all_events = False
if has_all_events:
events = sorted(events)
return events[-1]
else:
return self.closed_at
@property
def in_triage_project(self):
return self.triage_project_card is not None
class IssueTriage(object):
def __init__(self):
self._client = None
@property
def client(self):
if not self._client:
self._client = graphql.GraphQLClient()
return self._client
def _iter_issues(self, org, repo, issue_filter=None, output=None):
"""Iterate over issues in batches for a repository
Args:
org: The org that owns the repository
repo: The directory for the repository
output: The directory to write the results; if not specified results
are not downloaded
issue_filter: Used to filter issues to consider based on when they were
last updated
Writes the issues along with the first comments to a file in output
directory.
"""
client = graphql.GraphQLClient()
num_issues_per_page = 100
if not issue_filter:
today = datetime.datetime.now()
today = datetime.datetime(year=today.year, month=today.month, day=today.day)
start_time = today - datetime.timedelta(days=60)
# Labels and projects are available via timeline events.
# However, in timeline events project info (e.g. actual project name)
# is only in developer preview.
# The advantage of using labels and projectCards (as opposed to timeline
# events) is that its much easier to bound the number of items we need
# to fetch in order to return all labels and projects
# for timeline items its much more likely the labels and projects we care
# about will require pagination.
#
# TODO(jlewi): We should add a method to fetch all issue timeline items
# via pagination in the case the number of items exceeds the page size.
#
# TODO(jlewi): We need to consider closed issues if we want to compute
# stats.
#
# TODO(jlewi): We should support fetching only OPEN issues; if we are
# deciding which issues need triage or have been triaged we really only
# need to look at open isues. Closed Issues will automatically move to
# the appropriate card in the Kanban board.
query = """query getIssues($org: String!, $repo: String!, $pageSize: Int, $issueCursor: String, $filter: IssueFilters) {
repository(owner: $org, name: $repo) {
issues(first: $pageSize, filterBy: $filter, after: $issueCursor) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
author {
__typename
... on User {
login
}
... on Bot {
login
}
}
id
title
body
url
state
createdAt
closedAt
labels(first: 30) {
totalCount
edges {
node {
name
}
}
}
projectCards(first: 30) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
id
project {
name
number
}
}
}
}
timelineItems(first: 30) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
__typename
... on AddedToProjectEvent {
createdAt
}
... on LabeledEvent {
createdAt
label {
name
}
}
... on ClosedEvent {
createdAt
}
}
}
}
}
}
}
}
}
"""
shard = 0
num_pages = None
if output and not os.path.exists(output):
os.makedirs(output)
total_issues = None
has_next_issues_page = True
# TODO(jlewi): We should persist the cursors to disk so we can resume
# after errors
issues_cursor = None
shard_writer = None
if not issue_filter:
start_time = datetime.datetime.now() - datetime.timedelta(weeks=24)
issue_filter = {
"since": start_time.isoformat(),
}
while has_next_issues_page:
variables = {
"org": org,
"repo": repo,
"pageSize": num_issues_per_page,
"issueCursor": issues_cursor,
"filter": issue_filter,
}
results = client.run_query(query, variables=variables)
if results.get("errors"):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem issuing the query; errors:\n{message}\n")
return
if not total_issues:
total_issues = results["data"]["repository"]["issues"]["totalCount"]
num_pages = int(np.ceil(total_issues/float(num_issues_per_page)))
logging.info("%s/%s has a total of %s issues", org, repo, total_issues)
if output and not shard_writer:
logging.info("initializing the shard writer")
shard_writer = graphql.ShardWriter(num_pages, output,
prefix="issues-{0}-{1}".format(org, repo))
issues = graphql.unpack_and_split_nodes(
results, ["data", "repository", "issues", "edges"])
yield issues
if shard_writer:
shard_writer.write_shard(issues)
page_info = results["data"]["repository"]["issues"]["pageInfo"]
issues_cursor = page_info["endCursor"]
has_next_issues_page = page_info["hasNextPage"]
def download_issues(self, repo, output, issue_filter=None):
"""Download the issues to the specified directory
Args:
repo: Repository in the form {org}/{repo}
"""
org, repo_name = repo.split("/")
for shard_index, shard in enumerate(self._iter_issues(org, repo_name,
output=output,
issue_filter=None)):
logging.info("Wrote shard %s", shard_index)
def _build_dataframes(self, issues_dir):
"""Build dataframes containing triage info.
Args:
issues_dir: The directory containing issues
Returns:
data:
"""
def update_kanban_board(self):
"""Checks if any issues in the needs triage board can be removed.
"""
query = """query getIssues($issueCursor: String) {
search(type: ISSUE, query: "is:open is:issue org:kubeflow project:kubeflow/26", first: 100, after: $issueCursor) {
issueCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
__typename
... on Issue {
author {
__typename
... on User {
login
}
... on Bot {
login
}
}
id
title
body
url
state
createdAt
closedAt
labels(first: 30) {
totalCount
edges {
node {
name
}
}
}
projectCards(first: 30) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
id
project {
name
number
}
}
}
}
timelineItems(first: 30) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
__typename
... on AddedToProjectEvent {
createdAt
}
... on LabeledEvent {
createdAt
label {
name
}
}
... on ClosedEvent {
createdAt
}
}
}
}
}
}
}
}
}
"""
issues_cursor = None
has_next_issues_page = True
while has_next_issues_page:
variables = {
"issueCursor": issues_cursor,
}
results = self.client.run_query(query, variables=variables)
if results.get("errors"):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem issuing the query; errors:\n{message}\n")
return
issues = graphql.unpack_and_split_nodes(
results, ["data", "search", "edges"])
for i in issues:
self._process_issue(i)
page_info = results["data"]["search"]["pageInfo"]
issues_cursor = page_info["endCursor"]
has_next_issues_page = page_info["hasNextPage"]
def triage(self, repo, output=None, **kwargs):
"""Triage issues in the specified repository.
Args:
repo: Repository in the form {org}/{repo}
output: (Optional) directory to write issues
"""
org, repo_name = repo.split("/")
for shard_index, shard in enumerate(self._iter_issues(org, repo_name,
output=output,
**kwargs)):
logging.info("Processing shard %s", shard_index)
for i in shard:
self._process_issue(i)
def _get_issue(self, url):
"""Gets the complete issue.
This function does pagination to fetch all timeline items.
"""
# TODO(jlewi): We should impelement pagination for labels as well
query = """query getIssue($url: URI!, $timelineCursor: String) {
resource(url: $url) {
__typename
... on Issue {
author {
__typename
... on User {
login
}
... on Bot {
login
}
}
id
title
body
url
state
labels(first: 30) {
totalCount
edges {
node {
name
}
}
}
projectCards(first:30, ){
totalCount
edges {
node {
id
project {
name
number
}
}
}
}
timelineItems(first: 30, after: $timelineCursor) {
totalCount
pageInfo {
endCursor
hasNextPage
}
edges {
node {
__typename
... on AddedToProjectEvent {
createdAt
}
... on LabeledEvent {
createdAt
label {
name
}
}
... on ClosedEvent {
createdAt
}
}
}
}
}
}
}"""
variables = {
"url": url,
"timelineCursor": None,
}
results = self.client.run_query(query, variables=variables)
if results.get("errors"):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem issuing the query; errors:\n{message}\n")
return
issue = results["data"]["resource"]
has_next_page = issue["timelineItems"]["pageInfo"]["hasNextPage"]
while has_next_page:
variables["timelineCursor"] = issue["timelineItems"]["pageInfo"]["endCursor"]
results = self.client.run_query(query, variables=variables)
edges = (issue["timelineItems"]["edges"] +
results["data"]["resource"]["timelineItems"]["edges"])
issue["timelineItems"]["edges"] = edges
issue["timelineItems"]["pageInfo"] = (
results["data"]["resource"]["timelineItems"]["pageInfo"])
has_next_page = (results["data"]["resource"]["timelineItems"]["pageInfo"]
["hasNextPage"])
return issue
def triage_issue(self, url, project=None, add_comment=False):
"""Triage a single issue.
Args:
url: The url of the issue e.g.
https://github.com/kubeflow/community/issues/280
project: (Optional) If supplied the URL of the project to add issues
needing triage to.
add_comment: Set to true to comment on the issue with why
the issue needs triage
"""
issue = self._get_issue(url)
return self._process_issue(issue)
def _process_issue(self, issue, add_comment=False):
"""Process a single issue.
Args:
issue: Issue to process.
"""
if issue["timelineItems"]["pageInfo"]["hasNextPage"]:
# Since not all timelineItems were fetched; we need to refetch
# the issue and this time paginate to get all items.
logging.info("Issue: %s; fetching all timeline items", issue["url"])
issue = self._get_issue(issue["url"])
info = TriageInfo.from_issue(issue)
url = info.issue["url"]
logging.info(f"Issue {url}:\nstate:{info.message()}\n")
if not info.needs_triage:
self._remove_triage_project(info)
return
# TODO(jlewi): We should check if there is already a triage message
if add_comment:
mutation = """
mutation AddIssueComment($input: AddCommentInput!){
addComment(input:$input){
subject {
id
}
}
}
"""
mutation_variables = {
"input": {
"subjectId": issue["id"],
"body": info.message(),
}
}
results = client.run_query(mutation, variables=mutation_variables)
if results.get("errors"):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem commenting on the issue; errors:\n{message}\n")
return
# add project
self._add_triage_project(info)
return info
def _remove_triage_project(self, issue_info):
"""Remove the issue from the triage project.
Args:
issue_info: TriageInfo
"""
if not issue_info.in_triage_project:
return
add_card = """
mutation DeleteFromTriageProject($input: DeleteProjectCardInput!){
deleteProjectCard(input:$input) {
clientMutationId
}
}
"""
variables = {
"input": {
"cardId": issue_info.triage_project_card["id"],
}
}
logging.info("Issue %s remove from triage project", issue_info.issue["url"])
results = self.client.run_query(add_card, variables=variables)
if results.get("errors"):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem removing the issue from the triage project; errors:\n{message}\n")
return
def _add_triage_project(self, issue_info):
"""Add the issue to the triage project if needed
Args:
issue_info: IssueInfo
"""
if issue_info.in_triage_project:
logging.info("Issue %s already in triage project",
issue_info.issue["url"])
return
add_card = """
mutation AddProjectIssueCard($input: AddProjectCardInput!){
addProjectCard(input:$input) {
clientMutationId
}
}
"""
add_variables = {
"input": {
"contentId": issue_info.issue["id"],
"projectColumnId": PROJECT_CARD_ID,
}
}
results = self.client.run_query(add_card, variables=add_variables)
if results.get("errors"):
# Check if the error was because the issue was already added
ALREADY_ADDED = "Project already has the associated issue"
if not (len(results["errors"]) == 1 and
results["errors"][0]["message"] == ALREADY_ADDED):
message = json.dumps(results.get("errors"))
logging.error(f"There was a problem adding the issue to the project; errors:\n{message}\n")
return
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(message)s|%(pathname)s|%(lineno)d|'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
fire.Fire(IssueTriage)
|
[] |
[] |
[
"INPUT_NEEDS_TRIAGE_PROJECT_CARD_ID"
] |
[]
|
["INPUT_NEEDS_TRIAGE_PROJECT_CARD_ID"]
|
python
| 1 | 0 | |
plugins/broker/googlepubsub/googlepubsub.go
|
// Package googlepubsub provides a Google cloud pubsub broker
package googlepubsub
import (
"context"
"os"
"time"
"cloud.google.com/go/pubsub"
"github.com/google/uuid"
"github.com/ship-os/ship-micro/v2/broker"
"github.com/ship-os/ship-micro/v2/cmd"
log "github.com/ship-os/ship-micro/v2/logger"
"google.golang.org/api/option"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type pubsubBroker struct {
client *pubsub.Client
options broker.Options
}
// A pubsub subscriber that manages handling of messages
type subscriber struct {
options broker.SubscribeOptions
topic string
exit chan bool
sub *pubsub.Subscription
}
// A single publication received by a handler
type publication struct {
pm *pubsub.Message
m *broker.Message
topic string
err error
}
func init() {
cmd.DefaultBrokers["googlepubsub"] = NewBroker
}
func (s *subscriber) run(hdlr broker.Handler) {
if s.options.Context != nil {
if max, ok := s.options.Context.Value(maxOutstandingMessagesKey{}).(int); ok {
s.sub.ReceiveSettings.MaxOutstandingMessages = max
}
if max, ok := s.options.Context.Value(maxExtensionKey{}).(time.Duration); ok {
s.sub.ReceiveSettings.MaxExtension = max
}
}
ctx, cancel := context.WithCancel(context.Background())
for {
select {
case <-s.exit:
cancel()
return
default:
if err := s.sub.Receive(ctx, func(ctx context.Context, pm *pubsub.Message) {
// create broker message
m := &broker.Message{
Header: pm.Attributes,
Body: pm.Data,
}
// create publication
p := &publication{
pm: pm,
m: m,
topic: s.topic,
}
// If the error is nil lets check if we should auto ack
p.err = hdlr(p)
if p.err == nil {
// auto ack?
if s.options.AutoAck {
p.Ack()
}
}
}); err != nil {
time.Sleep(time.Second)
continue
}
}
}
}
func (s *subscriber) Options() broker.SubscribeOptions {
return s.options
}
func (s *subscriber) Topic() string {
return s.topic
}
func (s *subscriber) Unsubscribe() error {
select {
case <-s.exit:
return nil
default:
close(s.exit)
if deleteSubscription, ok := s.options.Context.Value(deleteSubscription{}).(bool); !ok || deleteSubscription {
return s.sub.Delete(context.Background())
}
return nil
}
}
func (p *publication) Ack() error {
p.pm.Ack()
return nil
}
func (p *publication) Error() error {
return p.err
}
func (p *publication) Topic() string {
return p.topic
}
func (p *publication) Message() *broker.Message {
return p.m
}
func (b *pubsubBroker) Address() string {
return ""
}
func (b *pubsubBroker) Connect() error {
return nil
}
func (b *pubsubBroker) Disconnect() error {
return b.client.Close()
}
// Init not currently implemented
func (b *pubsubBroker) Init(opts ...broker.Option) error {
return nil
}
func (b *pubsubBroker) Options() broker.Options {
return b.options
}
// Publish checks if the topic exists and then publishes via google pubsub
func (b *pubsubBroker) Publish(topic string, msg *broker.Message, opts ...broker.PublishOption) (err error) {
t := b.client.Topic(topic)
ctx := context.Background()
m := &pubsub.Message{
ID: "m-" + uuid.New().String(),
Data: msg.Body,
Attributes: msg.Header,
}
pr := t.Publish(ctx, m)
if _, err = pr.Get(ctx); err != nil {
// create Topic if not exists
if status.Code(err) == codes.NotFound {
log.Infof("Topic not exists. creating Topic: %s", topic)
if t, err = b.client.CreateTopic(ctx, topic); err == nil {
_, err = t.Publish(ctx, m).Get(ctx)
}
}
}
return
}
// Subscribe registers a subscription to the given topic against the google pubsub api
func (b *pubsubBroker) Subscribe(topic string, h broker.Handler, opts ...broker.SubscribeOption) (broker.Subscriber, error) {
options := broker.SubscribeOptions{
AutoAck: true,
Queue: "q-" + uuid.New().String(),
Context: b.options.Context,
}
for _, o := range opts {
o(&options)
}
ctx := context.Background()
sub := b.client.Subscription(options.Queue)
if createSubscription, ok := b.options.Context.Value(createSubscription{}).(bool); !ok || createSubscription {
exists, err := sub.Exists(ctx)
if err != nil {
return nil, err
}
if !exists {
tt := b.client.Topic(topic)
subb, err := b.client.CreateSubscription(ctx, options.Queue, pubsub.SubscriptionConfig{
Topic: tt,
AckDeadline: time.Duration(0),
})
if err != nil {
return nil, err
}
sub = subb
}
}
subscriber := &subscriber{
options: options,
topic: topic,
exit: make(chan bool),
sub: sub,
}
go subscriber.run(h)
return subscriber, nil
}
func (b *pubsubBroker) String() string {
return "googlepubsub"
}
// NewBroker creates a new google pubsub broker
func NewBroker(opts ...broker.Option) broker.Broker {
options := broker.Options{
Context: context.Background(),
}
for _, o := range opts {
o(&options)
}
// retrieve project id
prjID, _ := options.Context.Value(projectIDKey{}).(string)
// if `GOOGLEPUBSUB_PROJECT_ID` is present, it will overwrite programmatically set projectID
if envPrjID := os.Getenv("GOOGLEPUBSUB_PROJECT_ID"); len(envPrjID) > 0 {
prjID = envPrjID
}
// retrieve client opts
cOpts, _ := options.Context.Value(clientOptionKey{}).([]option.ClientOption)
// create pubsub client
c, err := pubsub.NewClient(context.Background(), prjID, cOpts...)
if err != nil {
panic(err.Error())
}
return &pubsubBroker{
client: c,
options: options,
}
}
|
[
"\"GOOGLEPUBSUB_PROJECT_ID\""
] |
[] |
[
"GOOGLEPUBSUB_PROJECT_ID"
] |
[]
|
["GOOGLEPUBSUB_PROJECT_ID"]
|
go
| 1 | 0 | |
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Algorithms/Sort/sort_matrix_diagonally.py
|
"""
Given a m * n matrix mat of integers,
sort it diagonally in ascending order
from the top-left to the bottom-right
then return the sorted array.
mat = [
[3,3,1,1],
[2,2,1,2],
[1,1,1,2]
]
Should return:
[
[1,1,1,1],
[1,2,2,2],
[1,2,3,3]
]
"""
from heapq import heappush, heappop
from typing import List
def sort_diagonally(mat: List[List[int]]) -> List[List[int]]:
# If the input is a vector, return the vector
if len(mat) == 1 or len(mat[0]) == 1:
return mat
# Rows + columns - 1
# The -1 helps you to not repeat a column
for i in range(len(mat) + len(mat[0]) - 1):
# Process the rows
if i + 1 < len(mat):
# Initialize heap, set row and column
h = []
row = len(mat) - (i + 1)
col = 0
# Traverse diagonally, and add the values to the heap
while row < len(mat):
heappush(h, (mat[row][col]))
row += 1
col += 1
# Sort the diagonal
row = len(mat) - (i + 1)
col = 0
while h:
ele = heappop(h)
mat[row][col] = ele
row += 1
col += 1
else:
# Process the columns
# Initialize heap, row and column
h = []
row = 0
col = i - (len(mat) - 1)
# Traverse Diagonally
while col < len(mat[0]) and row < len(mat):
heappush(h, (mat[row][col]))
row += 1
col += 1
# Sort the diagonal
row = 0
col = i - (len(mat) - 1)
while h:
ele = heappop(h)
mat[row][col] = ele
row += 1
col += 1
# Return the updated matrix
return mat
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
fandango_serv/fandango_algo/mytools/infer/predict_system.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2
import copy
import numpy as np
import time
import logging
from PIL import Image
import fandango_algo.mytools.infer.utility as utility
import fandango_algo.mytools.infer.predict_rec as predict_rec
import fandango_algo.mytools.infer.predict_det as predict_det
import fandango_algo.mytools.infer.predict_cls as predict_cls
from fandango_algo.ppocr.utils.utility import get_image_file_list, check_and_read_gif
from fandango_algo.ppocr.utils.logging import get_logger
from fandango_algo.mytools.infer.utility import draw_ocr_box_txt, get_rotate_crop_image
logger = get_logger()
class TextSystem(object):
def __init__(self, args):
if not args.show_log:
logger.setLevel(logging.INFO)
self.text_detector = predict_det.TextDetector(args)
self.text_recognizer = predict_rec.TextRecognizer(args)
self.use_angle_cls = args.use_angle_cls
self.drop_score = args.drop_score
if self.use_angle_cls:
self.text_classifier = predict_cls.TextClassifier(args)
def print_draw_crop_rec_res(self, img_crop_list, rec_res):
bbox_num = len(img_crop_list)
for bno in range(bbox_num):
cv2.imwrite("./output/img_crop_%d.jpg" % bno, img_crop_list[bno])
logger.info(bno, rec_res[bno])
def __call__(self, img, cls=True):
ori_im = img.copy()
dt_boxes, elapse = self.text_detector(img)
logger.debug("dt_boxes num : {}, elapse : {}".format(
len(dt_boxes), elapse))
if dt_boxes is None:
return None, None
img_crop_list = []
dt_boxes = sorted_boxes(dt_boxes)
for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = get_rotate_crop_image(ori_im, tmp_box)
h, w, c = img_crop.shape
img_crop_list.append(img_crop)
if self.use_angle_cls and cls:
img_crop_list, angle_list, elapse = self.text_classifier(
img_crop_list)
logger.debug("cls num : {}, elapse : {}".format(
len(img_crop_list), elapse))
rec_res, elapse = self.text_recognizer(img_crop_list)
logger.debug("rec_res num : {}, elapse : {}".format(
len(rec_res), elapse))
# self.print_draw_crop_rec_res(img_crop_list, rec_res)
filter_boxes, filter_rec_res = [], []
for box, rec_reuslt in zip(dt_boxes, rec_res):
text, score = rec_reuslt
if score >= self.drop_score:
filter_boxes.append(box)
filter_rec_res.append(rec_reuslt)
return filter_boxes, filter_rec_res
def sorted_boxes(dt_boxes):
"""
Sort text boxes in order from top to bottom, left to right
args:
dt_boxes(array):detected text boxes with shape [4, 2]
return:
sorted boxes(array) with shape [4, 2]
"""
num_boxes = dt_boxes.shape[0]
sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
_boxes = list(sorted_boxes)
for i in range(num_boxes - 1):
if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
(_boxes[i + 1][0][0] < _boxes[i][0][0]):
tmp = _boxes[i]
_boxes[i] = _boxes[i + 1]
_boxes[i + 1] = tmp
return _boxes
from fandango_algo.pdf._pdf2img import *
from fandango_algo.pdf._imgList import *
def reg(args, IMG):
imglist = IMG.img_list
text_sys = TextSystem(args)
is_visualize = True
font_path = args.vis_font_path
drop_score = args.drop_score
# warm up 10 times
if args.warmup:
img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8)
for i in range(10):
res = text_sys(img)
total_time = 0
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
_st = time.time()
count = 0
_l = ImgListWithTxt() # 储存box图片
_r = ImgListWithTxt() # 储存text图片
for idx, img in enumerate(imglist):
if img is None:
logger.info("error in loading image:{}".format(idx))
continue
starttime = time.time()
dt_boxes, rec_res = text_sys(img)
elapse = time.time() - starttime
total_time += elapse
print(rec_res)
logger.info(
str(idx) + " Predict time of %s: %.3fs" % (idx, elapse))
for text, score in rec_res:
logger.info("{}, {:.3f}".format(text, score))
if is_visualize:
image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
boxes = dt_boxes
txts = [rec_res[i][0] for i in range(len(rec_res))]
scores = [rec_res[i][1] for i in range(len(rec_res))]
# print(txts)
img_left, img_right = draw_ocr_box_txt(
image,
boxes,
txts,
scores,
drop_score=drop_score,
font_path=font_path)
_l.append(img_left, IMG.num2name[idx])
_l.append_txt(txts)
_r.append(img_right, IMG.num2name[idx])
_r.append_txt(txts)
# draw_img_save = "./inference_results/"
# if not os.path.exists(draw_img_save):
# os.makedirs(draw_img_save)
# cv2.imwrite(
# os.path.join(draw_img_save, os.path.basename(IMG.num[idx]+".png")),
# img_right[:, :, ::-1])
# logger.info("The visualized image saved in {}".format(
# os.path.join(draw_img_save, os.path.basename(IMG.img_dict[idx]+".png"))))
logger.info("The predict total time is {}".format(time.time() - _st))
logger.info("\nThe predict total time is {}".format(total_time))
return _l, _r
def init_params(prefix):
args = utility.parse_args()
args.image_dir = prefix + "./pdf/pdf_test/test1.pdf"
args.det_model_dir = prefix + "./inference/en_ppocr_mobile_v2.0_det_infer/"
args.rec_model_dir = prefix + "./inference/en_number_mobile_v2.0_rec_infer/"
args.cls_model_dir = prefix + "./inference/ch_ppocr_mobile_v2.0_cls_infer/"
args.vis_font_path = prefix + "./doc/fonts/simfang.ttf"
args.use_angle_cls = False
args.use_space_char = True
args.rec_char_dict_path = prefix + "./ppocr/utils/en_dict.txt"
args.use_gpu = False
return args
from fandango_algo.mytools.infer.pdf_struct import pdf_struct
def pdf2img2rec(pdf_path, prefix):
dimy = 1
# 切割图片
pi = pdf2img_empty_cut_one()
# pi=pdf2img_4cut()
pi.set_slice(5)
pi.pdf_image(pdf_path, r"../../pdf/c/", 4, 8, 0)
# pi.pdf_image(r"./TheLittlePrince.pdf", r"./litt/", 10, 10, 0)
pi.empty_cut()
slice = pi.slice
img = pi.IMG
# 初始化参数
args = init_params(prefix)
# 出图与文字
_l, _r = reg(args, img)
# 合并
_l.set_pice(slice, dimy)
_l.setName("box")
_l.rsz()
txt = _l.output_txt()
_r.set_pice(slice, dimy)
_r.setName("text")
_r.rsz()
# 整合数据
output = pdf_struct()
output.add_boximg(_l.img_list)
output.add_textimg(_r.img_list)
output.set_txt(txt)
output.add_origin(pdf_path)
return output
# r=output.img2pdf("text")
# with open("box.pdf","wb") as f:
# f.write(r)
def demo(output_path, pdf_st):
# 输出文字
r = pdf_st.get_txt()
with open(os.path.join(output_path, "text.txt"), "w") as f:
f.write(str(r))
# text pdf
r = pdf_st.img2pdf("text")
with open(os.path.join(output_path, "text.pdf"), "wb") as f:
f.write(r)
# box pdf
r = pdf_st.img2pdf("box")
with open(os.path.join(output_path, "box.pdf"), "wb") as f:
f.write(r)
# origin_pdf
r = pdf_st.img2pdf("origin")
with open(os.path.join(output_path, "origin.pdf"), "wb") as f:
f.write(r)
if __name__ == "__main__":
pdf = r"../../pdf/pdf_test/TheLittlePrince.pdf"
with open(pdf, "rb") as f:
c = f.read()
output = pdf2img2rec(c)
demo('./', output)
|
[] |
[] |
[
"FLAGS_allocator_strategy"
] |
[]
|
["FLAGS_allocator_strategy"]
|
python
| 1 | 0 | |
tests/integration/devfile/cmd_devfile_url_test.go
|
package devfile
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/openshift/odo/tests/helper"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("odo devfile url command tests", func() {
var componentName string
var commonVar helper.CommonVar
// This is run before every Spec (It)
var _ = BeforeEach(func() {
commonVar = helper.CommonBeforeEach()
componentName = helper.RandString(6)
helper.Chdir(commonVar.Context)
})
// This is run after every Spec (It)
var _ = AfterEach(func() {
helper.CommonAfterEach(commonVar)
})
Context("Listing urls", func() {
It("should list url after push using context", func() {
// to confirm that --context works we are using a subfolder of the context
subFolderContext := filepath.Join(commonVar.Context, helper.RandString(6))
helper.MakeDir(subFolderContext)
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, "--context", subFolderContext, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), subFolderContext)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(subFolderContext, "devfile.yaml"))
stdout := helper.CmdShouldFail("odo", "url", "create", url1, "--port", "3000", "--ingress", "--context", subFolderContext)
Expect(stdout).To(ContainSubstring("host must be provided"))
stdout = helper.CmdShouldFail("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress")
Expect(stdout).To(ContainSubstring("The current directory does not represent an odo component"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress", "--context", subFolderContext)
stdout = helper.CmdShouldPass("odo", "push", "--context", subFolderContext)
Expect(stdout).Should(ContainSubstring(url1 + "." + host))
stdout = helper.CmdShouldPass("odo", "url", "list", "--context", subFolderContext)
helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "false", "ingress"})
})
It("should list ingress url with appropriate state", func() {
url1 := helper.RandString(5)
url2 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--context", commonVar.Context, "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "9090", "--host", host, "--secure", "--ingress", "--context", commonVar.Context)
helper.CmdShouldPass("odo", "push", "--context", commonVar.Context)
stdout := helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "true", "ingress"})
helper.CmdShouldPass("odo", "url", "delete", url1, "-f", "--context", commonVar.Context)
helper.CmdShouldPass("odo", "url", "create", url2, "--port", "8080", "--host", host, "--ingress", "--context", commonVar.Context)
stdout = helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", "true", "ingress"})
helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "ingress"})
})
It("should be able to list ingress url in machine readable json format", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
// remove the endpoint came with the devfile
// need to create an ingress to be more general for openshift/non-openshift cluster to run
helper.CmdShouldPass("odo", "url", "delete", "3000/tcp", "-f")
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
// odo url list -o json
helper.WaitForCmdOut("odo", []string{"url", "list", "-o", "json"}, 1, true, func(output string) bool {
desiredURLListJSON := fmt.Sprintf(`{"kind":"List","apiVersion":"odo.dev/v1alpha1","metadata":{},"items":[{"kind":"url","apiVersion":"odo.dev/v1alpha1","metadata":{"name":"%s","creationTimestamp":null},"spec":{"host":"%s","port":3000,"secure": false,"path": "/", "kind":"ingress"},"status":{"state":"Pushed"}}]}`, url1, url1+"."+host)
if strings.Contains(output, url1) {
Expect(desiredURLListJSON).Should(MatchJSON(output))
return true
}
return false
})
})
})
Context("Creating urls", func() {
It("should create a URL without port flag if only one port exposed in devfile", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--host", host, "--ingress")
stdout := helper.CmdShouldPass("odo", "url", "list")
helper.MatchAllInOutput(stdout, []string{url1, "3000", "Not Pushed"})
})
It("should create a secure URL", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "9090", "--host", host, "--secure", "--ingress")
stdout := helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.MatchAllInOutput(stdout, []string{"https:", url1 + "." + host})
stdout = helper.CmdShouldPass("odo", "url", "list")
helper.MatchAllInOutput(stdout, []string{"https:", url1 + "." + host, "true"})
})
It("create and delete with now flag should pass", func() {
var stdout string
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
stdout = helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--now", "--ingress", "--context", commonVar.Context)
// check the env for the runMode
envOutput, err := helper.ReadFile(filepath.Join(commonVar.Context, ".odo/env/env.yaml"))
Expect(err).To(BeNil())
Expect(envOutput).To(ContainSubstring(" RunMode: run"))
helper.MatchAllInOutput(stdout, []string{"URL " + url1 + " created for component", "http:", url1 + "." + host})
stdout = helper.CmdShouldPass("odo", "url", "delete", url1, "--now", "-f", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{"URL " + url1 + " successfully deleted", "Applying URL changes"})
})
It("should be able to push again twice after creating and deleting a url", func() {
var stdOut string
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000", "--host", host, "--ingress")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
stdOut = helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.DontMatchAllInOutput(stdOut, []string{"successfully deleted", "created"})
Expect(stdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required"))
helper.CmdShouldPass("odo", "url", "delete", url1, "-f")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
stdOut = helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.DontMatchAllInOutput(stdOut, []string{"successfully deleted", "created"})
Expect(stdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required"))
})
It("should not allow creating an invalid host", func() {
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project)
stdOut := helper.CmdShouldFail("odo", "url", "create", "--host", "https://127.0.0.1:60104", "--port", "3000", "--ingress")
Expect(stdOut).To(ContainSubstring("is not a valid host name"))
})
It("should not allow using tls secret if url is not secure", func() {
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project)
stdOut := helper.CmdShouldFail("odo", "url", "create", "--tls-secret", "foo", "--port", "3000", "--ingress")
Expect(stdOut).To(ContainSubstring("TLS secret is only available for secure URLs of Ingress kind"))
})
It("should report multiple issues when it's the case", func() {
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project)
stdOut := helper.CmdShouldFail("odo", "url", "create", "--host", "https://127.0.0.1:60104", "--tls-secret", "foo", "--port", "3000", "--ingress")
Expect(stdOut).To(And(ContainSubstring("is not a valid host name"), ContainSubstring("TLS secret is only available for secure URLs of Ingress kind")))
})
It("should not allow creating under an invalid container", func() {
containerName := helper.RandString(5)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project)
stdOut := helper.CmdShouldFail("odo", "url", "create", "--host", "com", "--port", "3000", "--container", containerName, "--ingress")
Expect(stdOut).To(ContainSubstring(fmt.Sprintf("the container specified: %s does not exist in devfile", containerName)))
})
It("should not allow creating an endpoint with same name", func() {
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
stdOut := helper.CmdShouldFail("odo", "url", "create", "3000/tcp", "--host", "com", "--port", "3000", "--ingress")
Expect(stdOut).To(ContainSubstring("url 3000/tcp already exist in devfile endpoint entry"))
})
It("should create URL with path defined in Endpoint", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "8090", "--host", host, "--path", "testpath", "--ingress")
stdout := helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.MatchAllInOutput(stdout, []string{url1, "/testpath", "created"})
})
It("should create URLs under different container names", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
url2 := helper.RandString(5)
helper.CmdShouldPass("odo", "create", "java-springboot", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "8080", "--host", host, "--container", "runtime", "--ingress")
helper.CmdShouldPass("odo", "url", "create", url2, "--port", "9090", "--host", host, "--container", "tools", "--ingress")
stdout := helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.MatchAllInOutput(stdout, []string{url1, url2, "created"})
})
It("should not create URLs under different container names with same port number", func() {
url1 := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "java-springboot", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "springboot", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "springboot", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
stdout := helper.CmdShouldFail("odo", "url", "create", url1, "--port", "8080", "--host", host, "--container", "tools", "--ingress")
helper.MatchAllInOutput(stdout, []string{fmt.Sprintf("cannot set URL %s under container tools", url1), "TargetPort 8080 is being used under container runtime"})
})
It("should error out on devfile flag", func() {
helper.CmdShouldFail("odo", "url", "create", "mynodejs", "--devfile", "invalid.yaml")
helper.CmdShouldFail("odo", "url", "delete", "mynodejs", "--devfile", "invalid.yaml")
})
})
Context("Testing URLs for OpenShift specific scenarios", func() {
JustBeforeEach(func() {
if os.Getenv("KUBERNETES") == "true" {
Skip("This is a OpenShift specific scenario, skipping")
}
})
It("should error out when a host is provided with a route on a openShift cluster", func() {
url1 := helper.RandString(5)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
output := helper.CmdShouldFail("odo", "url", "create", url1, "--host", "com", "--port", "3000")
Expect(output).To(ContainSubstring("host is not supported"))
})
It("should list route and ingress urls with appropriate state", func() {
url1 := helper.RandString(5)
url2 := helper.RandString(5)
ingressurl := helper.RandString(5)
host := helper.RandString(5) + ".com"
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "9090", "--secure")
helper.CmdShouldPass("odo", "url", "create", ingressurl, "--port", "8080", "--host", host, "--ingress")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.CmdShouldPass("odo", "url", "create", url2, "--port", "8080")
stdout := helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{url1, "Pushed", "true", "route"})
helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "route"})
helper.MatchAllInOutput(stdout, []string{ingressurl, "Pushed", "false", "ingress"})
helper.CmdShouldPass("odo", "url", "delete", url1, "-f")
stdout = helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{url1, "Locally Deleted", "true", "route"})
helper.MatchAllInOutput(stdout, []string{url2, "Not Pushed", "false", "route"})
helper.MatchAllInOutput(stdout, []string{ingressurl, "Pushed", "false", "ingress"})
})
It("should create a automatically route on a openShift cluster", func() {
url1 := helper.RandString(5)
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "3000")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
pushStdOut := helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.DontMatchAllInOutput(pushStdOut, []string{"successfully deleted", "created"})
Expect(pushStdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required"))
output := helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
Expect(output).Should(ContainSubstring(url1))
helper.CmdShouldPass("odo", "url", "delete", url1, "-f")
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
pushStdOut = helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.DontMatchAllInOutput(pushStdOut, []string{"successfully deleted", "created"})
Expect(pushStdOut).To(ContainSubstring("URLs are synced with the cluster, no changes are required"))
output = helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
Expect(output).ShouldNot(ContainSubstring(url1))
})
It("should create a route on a openShift cluster without calling url create", func() {
helper.CmdShouldPass("odo", "create", "nodejs", "--project", commonVar.Project, componentName)
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
output := helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
helper.MatchAllInOutput(output, []string{"URL 3000-tcp", "created"})
output = helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
Expect(output).Should(ContainSubstring("3000-tcp"))
})
It("should create a url for a unsupported devfile component", func() {
url1 := helper.RandString(5)
helper.CopyExample(filepath.Join("source", "python"), commonVar.Context)
helper.Chdir(commonVar.Context)
helper.CmdShouldPass("odo", "create", "python", "--project", commonVar.Project, componentName)
helper.CmdShouldPass("odo", "url", "create", url1)
helper.CmdShouldPass("odo", "push", "--project", commonVar.Project)
output := helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
Expect(output).Should(ContainSubstring(url1))
})
// remove once https://github.com/openshift/odo/issues/3550 is resolved
It("should list URLs for s2i components", func() {
url1 := helper.RandString(5)
url2 := helper.RandString(5)
componentName := helper.RandString(6)
helper.CopyExample(filepath.Join("source", "nodejs"), commonVar.Context)
helper.CmdShouldPass("odo", "create", "nodejs", "--context", commonVar.Context, "--project", commonVar.Project, componentName, "--s2i")
helper.CmdShouldPass("odo", "url", "create", url1, "--port", "8080", "--context", commonVar.Context)
helper.CmdShouldPass("odo", "url", "create", url2, "--port", "8080", "--context", commonVar.Context, "--ingress", "--host", "com")
stdout := helper.CmdShouldPass("odo", "url", "list", "--context", commonVar.Context)
helper.MatchAllInOutput(stdout, []string{url1, url2})
})
})
})
|
[
"\"KUBERNETES\""
] |
[] |
[
"KUBERNETES"
] |
[]
|
["KUBERNETES"]
|
go
| 1 | 0 | |
python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.flowers as flowers
import math
import paddle.fluid as fluid
import unittest
import numpy as np
import paddle
import os
def Lenet(data, class_dim):
conv1 = fluid.layers.conv2d(data, 32, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 50, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=500, act='relu')
fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax')
return fc2
class TestFetchOp(unittest.TestCase):
def parallel_exe(self, train_inputs, seed, use_cuda):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = seed
with fluid.program_guard(main, startup):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
# TODO(zcd): I found that onece the memory optimizer is open,
# parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD,
# conv2d_1.b_0@GRAD. Those variables should not be pruned.
# fluid.memory_optimize(main)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
pe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=main)
fetch_list = []
all_vars = main.global_block().vars
for k, v in all_vars.iteritems():
if 'tmp' not in k and k[0] is not '_' or v.persistable:
fetch_list.append(k)
for data in train_inputs:
ret = pe.run(fetch_list, feed=feeder.feed(data))
for i in range(len(fetch_list)):
assert not math.isnan(np.sum(ret[i])) and \
not math.isinf(np.sum(ret[i]))
def test_fetch_op(self):
tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16)
tst_reader_iter = tst_reader()
iters = 3
train_inputs = []
for i in range(iters):
train_inputs.append(tst_reader_iter.next())
os.environ['CPU_NUM'] = str(4)
self.parallel_exe(train_inputs, seed=1, use_cuda=True)
self.parallel_exe(train_inputs, seed=1, use_cuda=False)
class TestFeedParallel(unittest.TestCase):
def parallel_exe(self, use_cuda, seed):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = seed
with fluid.scope_guard(fluid.core.Scope()):
with fluid.program_guard(main, startup):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
reader = feeder.decorate_reader(
paddle.batch(
flowers.train(), batch_size=16), multi_devices=True)
exe = fluid.Executor(place)
exe.run(startup)
pe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=main)
for batch_id, data in enumerate(reader()):
loss_np = np.array(pe.run(feed=data, fetch_list=[loss.name])[0])
print batch_id, loss_np
if batch_id == 2:
break
def test_feed_op(self):
os.environ['CPU_NUM'] = str(4)
self.parallel_exe(use_cuda=True, seed=1)
self.parallel_exe(use_cuda=False, seed=1)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"CPU_NUM"
] |
[]
|
["CPU_NUM"]
|
python
| 1 | 0 | |
datalabeling/export_data.py
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.api_core.client_options import ClientOptions
# [START datalabeling_export_data_beta]
def export_data(dataset_resource_name, annotated_dataset_resource_name,
export_gcs_uri):
"""Exports a dataset from the given Google Cloud project."""
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
# [END datalabeling_export_data_beta]
# If provided, use a provided test endpoint - this will prevent tests on
# this snippet from triggering any action by a real human
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
# [START datalabeling_export_data_beta]
gcs_destination = datalabeling.types.GcsDestination(
output_uri=export_gcs_uri, mime_type='text/csv')
output_config = datalabeling.types.OutputConfig(
gcs_destination=gcs_destination)
response = client.export_data(
dataset_resource_name,
annotated_dataset_resource_name,
output_config
)
print('Dataset ID: {}\n'.format(response.result().dataset))
print('Output config:')
print('\tGcs destination:')
print('\t\tOutput URI: {}\n'.format(
response.result().output_config.gcs_destination.output_uri))
# [END datalabeling_export_data_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--dataset-resource-name',
help='Dataset resource name. Required.',
required=True
)
parser.add_argument(
'--annotated-dataset-resource-name',
help='Annotated Dataset resource name. Required.',
required=True
)
parser.add_argument(
'--export-gcs-uri',
help='The export GCS URI. Required.',
required=True
)
args = parser.parse_args()
export_data(
args.dataset_resource_name,
args.annotated_dataset_resource_name,
args.export_gcs_uri
)
|
[] |
[] |
[
"DATALABELING_ENDPOINT"
] |
[]
|
["DATALABELING_ENDPOINT"]
|
python
| 1 | 0 | |
src/toil/test/cwl/cwlTest.py
|
# Copyright (C) 2015-2021 Regents of the University of California
# Copyright (C) 2015 Curoverse, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import unittest
import uuid
import zipfile
from io import StringIO
from mock import Mock, call
from typing import Dict, List, MutableMapping, Optional
from urllib.request import urlretrieve
import psutil
import pytest
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
from toil.cwl.utils import visit_top_cwl_class, visit_cwl_class_and_reduce, download_structure
from toil.fileStores import FileID
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.test import (ToilTest,
needs_aws_s3,
needs_cwl,
needs_docker,
needs_gridengine,
needs_kubernetes,
needs_lsf,
needs_mesos,
needs_parasol,
needs_slurm,
needs_torque,
slow)
log = logging.getLogger(__name__)
CONFORMANCE_TEST_TIMEOUT = 3600
def run_conformance_tests(workDir: str, yml: str, caching: bool = False, batchSystem: str = None,
selected_tests: str = None, selected_tags: str = None, skipped_tests: str = None,
extra_args: List[str] = [], must_support_all_features: bool = False) -> Optional[str]:
"""
Run the CWL conformance tests.
:param workDir: Directory to run tests in.
:param yml: CWL test list YML to run tests from.
:param caching: If True, use Toil file store caching.
:param batchSystem: If set, use this batch system instead of the default single_machine.
:param selected_tests: If set, use this description of test numbers to run (comma-separated numbers or ranges)
:param selected_tags: As an alternative to selected_tests, run tests with the given tags.
:param skipped_tests: Comma-separated string labels of tests to skip.
:param extra_args: Provide these extra arguments to toil-cwl-runner for each test.
:param must_support_all_features: If set, fail if some CWL optional features are unsupported.
"""
try:
cmd = ['cwltest',
'--verbose',
'--tool=toil-cwl-runner',
f'--test={yml}',
'--timeout=2400',
f'--basedir={workDir}']
if selected_tests:
cmd.append(f'-n={selected_tests}')
if selected_tags:
cmd.append(f'--tags={selected_tags}')
if skipped_tests:
cmd.append(f'-S{skipped_tests}')
args_passed_directly_to_toil = [f'--disableCaching={not caching}',
'--clean=always',
'--logDebug'] + extra_args
if 'SINGULARITY_DOCKER_HUB_MIRROR' in os.environ:
args_passed_directly_to_toil.append('--setEnv=SINGULARITY_DOCKER_HUB_MIRROR')
job_store_override = None
if batchSystem == 'kubernetes':
# Run tests in parallel on Kubernetes.
# We can throw a bunch at it at once and let Kubernetes schedule.
cmd.append('-j8')
else:
# Run tests in parallel on the local machine
cmd.append(f'-j{int(psutil.cpu_count()/2)}')
if batchSystem:
args_passed_directly_to_toil.append(f"--batchSystem={batchSystem}")
cmd.extend(['--'] + args_passed_directly_to_toil)
log.info("Running: '%s'", "' '".join(cmd))
try:
output = subprocess.check_output(cmd, cwd=workDir, stderr=subprocess.STDOUT)
finally:
if job_store_override:
# Clean up the job store we used for all the tests, if it is still there.
subprocess.run(['toil', 'clean', job_store_override])
except subprocess.CalledProcessError as e:
only_unsupported = False
# check output -- if we failed but only have unsupported features, we're okay
p = re.compile(r"(?P<failures>\d+) failures, (?P<unsupported>\d+) unsupported features")
error_log = e.output.decode('utf-8')
for line in error_log.split('\n'):
m = p.search(line)
if m:
if int(m.group("failures")) == 0 and int(m.group("unsupported")) > 0:
only_unsupported = True
break
if (not only_unsupported) or must_support_all_features:
print(error_log)
raise e
@needs_cwl
class CWLv10Test(ToilTest):
def setUp(self):
"""Runs anew before each test to create farm fresh temp dirs."""
self.outDir = f'/tmp/toil-cwl-test-{str(uuid.uuid4())}'
os.makedirs(self.outDir)
self.rootDir = self._projectRootPath()
self.cwlSpec = os.path.join(self.rootDir, 'src/toil/test/cwl/spec')
self.workDir = os.path.join(self.cwlSpec, 'v1.0')
# The latest cwl git commit hash from https://github.com/common-workflow-language/common-workflow-language.
# Update it to get the latest tests.
testhash = '6a955874ade22080b8ef962b4e0d6e408112c1ef' # Date: Tue Dec 16 2020 8:43pm PST
url = 'https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip' % testhash
if not os.path.exists(self.cwlSpec):
urlretrieve(url, 'spec.zip')
with zipfile.ZipFile('spec.zip', 'r') as z:
z.extractall()
shutil.move('common-workflow-language-%s' % testhash, self.cwlSpec)
os.remove('spec.zip')
def tearDown(self):
"""Clean up outputs."""
if os.path.exists(self.outDir):
shutil.rmtree(self.outDir)
unittest.TestCase.tearDown(self)
def _tester(self, cwlfile, jobfile, expect, main_args=[], out_name="output"):
from toil.cwl import cwltoil
st = StringIO()
main_args = main_args[:]
main_args.extend(['--outdir', self.outDir,
os.path.join(self.rootDir, cwlfile), os.path.join(self.rootDir, jobfile)])
cwltoil.main(main_args, stdout=st)
out = json.loads(st.getvalue())
out[out_name].pop("http://commonwl.org/cwltool#generation", None)
out[out_name].pop("nameext", None)
out[out_name].pop("nameroot", None)
self.assertEqual(out, expect)
def _debug_worker_tester(self, cwlfile, jobfile, expect):
from toil.cwl import cwltoil
st = StringIO()
cwltoil.main(['--debugWorker', '--outdir', self.outDir,
os.path.join(self.rootDir, cwlfile),
os.path.join(self.rootDir, jobfile)], stdout=st)
out = json.loads(st.getvalue())
out["output"].pop("http://commonwl.org/cwltool#generation", None)
out["output"].pop("nameext", None)
out["output"].pop("nameroot", None)
self.assertEqual(out, expect)
def revsort(self, cwl_filename, tester_fn):
tester_fn('src/toil/test/cwl/' + cwl_filename,
'src/toil/test/cwl/revsort-job.json',
self._expected_revsort_output(self.outDir))
def download(self, inputs, tester_fn):
input_location = os.path.join('src/toil/test/cwl', inputs)
tester_fn('src/toil/test/cwl/download.cwl',
input_location,
self._expected_download_output(self.outDir))
def test_mpi(self):
from toil.cwl import cwltoil
stdout = StringIO()
main_args = ['--outdir', self.outDir,
'--enable-dev',
'--enable-ext',
'--mpi-config-file', os.path.join(self.rootDir, 'src/toil/test/cwl/mock_mpi/fake_mpi.yml'),
os.path.join(self.rootDir, 'src/toil/test/cwl/mpi_simple.cwl')]
cwltoil.main(main_args, stdout=stdout)
out = json.loads(stdout.getvalue())
with open(out.get('pids', {}).get('location')[len('file://'):], 'r') as f:
two_pids = [int(i) for i in f.read().split()]
self.assertEqual(len(two_pids), 2)
self.assertTrue(isinstance(two_pids[0], int))
self.assertTrue(isinstance(two_pids[1], int))
@needs_aws_s3
def test_s3_as_secondary_file(self):
from toil.cwl import cwltoil
stdout = StringIO()
main_args = ['--outdir', self.outDir,
os.path.join(self.rootDir, 'src/toil/test/cwl/s3_secondary_file.cwl'),
os.path.join(self.rootDir, 'src/toil/test/cwl/s3_secondary_file.json')]
cwltoil.main(main_args, stdout=stdout)
out = json.loads(stdout.getvalue())
self.assertEqual(out['output']['checksum'], 'sha1$d14dd02e354918b4776b941d154c18ebc15b9b38')
self.assertEqual(out['output']['size'], 24)
with open(out['output']['location'][len('file://'):], 'r') as f:
self.assertEqual(f.read().strip(), 'When is s4 coming out?')
def test_run_revsort(self):
self.revsort('revsort.cwl', self._tester)
def test_run_revsort2(self):
self.revsort('revsort2.cwl', self._tester)
def test_run_revsort_debug_worker(self):
self.revsort('revsort.cwl', self._debug_worker_tester)
@needs_aws_s3
def test_run_s3(self):
self.download('download_s3.json', self._tester)
def test_run_http(self):
self.download('download_http.json', self._tester)
def test_run_https(self):
self.download('download_https.json', self._tester)
@slow
def test_bioconda(self):
self._tester('src/toil/test/cwl/seqtk_seq.cwl',
'src/toil/test/cwl/seqtk_seq_job.json',
self._expected_seqtk_output(self.outDir),
main_args=["--beta-conda-dependencies"],
out_name="output1")
@needs_docker
def test_biocontainers(self):
self._tester('src/toil/test/cwl/seqtk_seq.cwl',
'src/toil/test/cwl/seqtk_seq_job.json',
self._expected_seqtk_output(self.outDir),
main_args=["--beta-use-biocontainers"],
out_name="output1")
@slow
def test_restart(self):
"""
Enable restarts with toil-cwl-runner -- run failing test, re-run correct test.
Only implemented for single machine.
"""
log.info('Running CWL Test Restart. Expecting failure, then success.')
from toil.cwl import cwltoil
from toil.jobStores.abstractJobStore import NoSuchJobStoreException
from toil.leader import FailedJobsException
outDir = self._createTempDir()
cwlDir = os.path.join(self._projectRootPath(), "src", "toil", "test", "cwl")
cmd = ['--outdir', outDir, '--jobStore', os.path.join(outDir, 'jobStore'), "--no-container",
os.path.join(cwlDir, "revsort.cwl"), os.path.join(cwlDir, "revsort-job.json")]
# create a fake rev bin that actually points to the "date" binary
cal_path = [d for d in os.environ["PATH"].split(':') if os.path.exists(os.path.join(d, 'date'))][-1]
os.symlink(os.path.join(cal_path, 'date'), f'{os.path.join(outDir, "rev")}')
def path_with_bogus_rev():
# append to the front of the PATH so that we check there first
return f'{outDir}:' + os.environ["PATH"]
orig_path = os.environ["PATH"]
# Force a failure by trying to use an incorrect version of `rev` from the PATH
os.environ["PATH"] = path_with_bogus_rev()
try:
cwltoil.main(cmd)
self.fail("Expected problem job with incorrect PATH did not fail")
except FailedJobsException:
pass
# Finish the job with a correct PATH
os.environ["PATH"] = orig_path
cwltoil.main(["--restart"] + cmd)
# Should fail because previous job completed successfully
try:
cwltoil.main(["--restart"] + cmd)
self.fail("Restart with missing directory did not fail")
except NoSuchJobStoreException:
pass
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance_with_caching(self):
self.test_run_conformance(caching=True)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance(self, batchSystem=None, caching=False, selected_tests=None):
run_conformance_tests(workDir=self.workDir,
yml='conformance_test_v1.0.yaml',
caching=caching,
batchSystem=batchSystem,
selected_tests=selected_tests)
@slow
@needs_lsf
@unittest.skip
def test_lsf_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="lsf", **kwargs)
@slow
@needs_slurm
@unittest.skip
def test_slurm_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="slurm", **kwargs)
@slow
@needs_torque
@unittest.skip
def test_torque_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="torque", **kwargs)
@slow
@needs_gridengine
@unittest.skip
def test_gridengine_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="grid_engine", **kwargs)
@slow
@needs_mesos
@unittest.skip
def test_mesos_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="mesos", **kwargs)
@slow
@needs_parasol
@unittest.skip
def test_parasol_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="parasol", **kwargs)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="kubernetes",
# This test doesn't work with
# Singularity; see
# https://github.com/common-workflow-language/cwltool/blob/7094ede917c2d5b16d11f9231fe0c05260b51be6/conformance-test.sh#L99-L117
skipped_tests="docker_entrypoint",
**kwargs)
@slow
@needs_lsf
@unittest.skip
def test_lsf_cwl_conformance_with_caching(self):
return self.test_lsf_cwl_conformance(caching=True)
@slow
@needs_slurm
@unittest.skip
def test_slurm_cwl_conformance_with_caching(self):
return self.test_slurm_cwl_conformance(caching=True)
@slow
@needs_torque
@unittest.skip
def test_torque_cwl_conformance_with_caching(self):
return self.test_torque_cwl_conformance(caching=True)
@slow
@needs_gridengine
@unittest.skip
def test_gridengine_cwl_conformance_with_caching(self):
return self.test_gridengine_cwl_conformance(caching=True)
@slow
@needs_mesos
@unittest.skip
def test_mesos_cwl_conformance_with_caching(self):
return self.test_mesos_cwl_conformance(caching=True)
@slow
@needs_parasol
@unittest.skip
def test_parasol_cwl_conformance_with_caching(self):
return self.test_parasol_cwl_conformance(caching=True)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance_with_caching(self):
return self.test_kubernetes_cwl_conformance(caching=True)
@staticmethod
def _expected_seqtk_output(outDir):
# Having unicode string literals isn't necessary for the assertion but
# makes for a less noisy diff in case the assertion fails.
loc = 'file://' + os.path.join(outDir, 'out')
return {
'output1': {
'location': loc,
'checksum': 'sha1$322e001e5a99f19abdce9f02ad0f02a17b5066c2',
'basename': 'out',
'class': 'File',
'size': 150}}
@staticmethod
def _expected_revsort_output(outDir):
# Having unicode string literals isn't necessary for the assertion but
# makes for a less noisy diff in case the assertion fails.
loc = 'file://' + os.path.join(outDir, 'output.txt')
return {
'output': {
'location': loc,
'basename': 'output.txt',
'size': 1111,
'class': 'File',
'checksum': 'sha1$b9214658cc453331b62c2282b772a5c063dbd284'}}
@staticmethod
def _expected_download_output(outDir):
# Having unicode string literals isn't necessary for the assertion but
# makes for a less noisy diff in case the assertion fails.
loc = 'file://' + os.path.join(outDir, 'output.txt')
return {
'output': {
'location': loc,
'basename': 'output.txt',
'size': 0,
'class': 'File',
'checksum': 'sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709'}}
@needs_cwl
class CWLv11Test(ToilTest):
@classmethod
def setUpClass(cls):
"""Runs anew before each test to create farm fresh temp dirs."""
cls.outDir = f'/tmp/toil-cwl-v1_1-test-{str(uuid.uuid4())}'
os.makedirs(cls.outDir)
cls.rootDir = cls._projectRootPath()
cls.cwlSpec = os.path.join(cls.rootDir, 'src/toil/test/cwl/spec_v11')
cls.test_yaml = os.path.join(cls.cwlSpec, 'conformance_tests.yaml')
# TODO: Use a commit zip in case someone decides to rewrite master's history?
url = 'https://github.com/common-workflow-language/cwl-v1.1.git'
commit = '664835e83eb5e57eee18a04ce7b05fb9d70d77b7'
p = subprocess.Popen(f'git clone {url} {cls.cwlSpec} && cd {cls.cwlSpec} && git checkout {commit}', shell=True)
p.communicate()
def tearDown(self):
"""Clean up outputs."""
if os.path.exists(self.outDir):
shutil.rmtree(self.outDir)
unittest.TestCase.tearDown(self)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance(self, **kwargs):
run_conformance_tests(workDir=self.cwlSpec,
yml=self.test_yaml,
**kwargs)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance_with_caching(self):
self.test_run_conformance(caching=True)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="kubernetes",
# These tests don't work with
# Singularity; see
# https://github.com/common-workflow-language/cwltool/blob/7094ede917c2d5b16d11f9231fe0c05260b51be6/conformance-test.sh#L99-L117
skipped_tests="docker_entrypoint,stdin_shorcut",
**kwargs)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance_with_caching(self):
return self.test_kubernetes_cwl_conformance(caching=True)
@needs_cwl
class CWLv12Test(ToilTest):
@classmethod
def setUpClass(cls):
"""Runs anew before each test to create farm fresh temp dirs."""
cls.outDir = f'/tmp/toil-cwl-v1_2-test-{str(uuid.uuid4())}'
os.makedirs(cls.outDir)
cls.rootDir = cls._projectRootPath()
cls.cwlSpec = os.path.join(cls.rootDir, 'src/toil/test/cwl/spec_v12')
cls.test_yaml = os.path.join(cls.cwlSpec, 'conformance_tests.yaml')
# TODO: Use a commit zip in case someone decides to rewrite master's history?
url = 'https://github.com/common-workflow-language/cwl-v1.2.git'
commit = '8c3fd9d9f0209a51c5efacb1c7bc02a1164688d6'
p = subprocess.Popen(f'git clone {url} {cls.cwlSpec} && cd {cls.cwlSpec} && git checkout {commit}', shell=True)
p.communicate()
def tearDown(self):
"""Clean up outputs."""
if os.path.exists(self.outDir):
shutil.rmtree(self.outDir)
unittest.TestCase.tearDown(self)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance(self, **kwargs):
run_conformance_tests(workDir=self.cwlSpec,
yml=self.test_yaml,
**kwargs)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance_with_caching(self):
self.test_run_conformance(caching=True)
@slow
@pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
def test_run_conformance_with_in_place_update(self):
"""
Make sure that with --bypass-file-store we properly support in place
update on a single node, and that this doesn't break any other
features.
"""
self.test_run_conformance(extra_args=['--bypass-file-store'],
must_support_all_features=True)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance(self, **kwargs):
return self.test_run_conformance(batchSystem="kubernetes",
# This test doesn't work with
# Singularity; see
# https://github.com/common-workflow-language/cwltool/blob/7094ede917c2d5b16d11f9231fe0c05260b51be6/conformance-test.sh#L99-L117
# and
# https://github.com/common-workflow-language/cwltool/issues/1441#issuecomment-826747975
skipped_tests="docker_entrypoint",
**kwargs)
@slow
@needs_kubernetes
def test_kubernetes_cwl_conformance_with_caching(self):
return self.test_kubernetes_cwl_conformance(caching=True)
def _expected_streaming_output(self, outDir):
# Having unicode string literals isn't necessary for the assertion but
# makes for a less noisy diff in case the assertion fails.
loc = "file://" + os.path.join(outDir, "output.txt")
return {
"output": {
"location": loc,
"basename": "output.txt",
"size": 24,
"class": "File",
"checksum": "sha1$d14dd02e354918b4776b941d154c18ebc15b9b38",
}
}
@needs_aws_s3
def test_streamable(self):
"""
Test that a file with 'streamable'=True is a named pipe
"""
cwlfile = "src/toil/test/cwl/stream.cwl"
jobfile = "src/toil/test/cwl/stream.json"
out_name = "output"
jobstore = f'--jobStore=aws:us-west-1:toil-stream-{uuid.uuid4()}'
from toil.cwl import cwltoil
st = StringIO()
args = [
"--outdir",
self.outDir,
jobstore,
os.path.join(self.rootDir, cwlfile),
os.path.join(self.rootDir, jobfile),
]
cwltoil.main(args, stdout=st)
out = json.loads(st.getvalue())
out[out_name].pop("http://commonwl.org/cwltool#generation", None)
out[out_name].pop("nameext", None)
out[out_name].pop("nameroot", None)
self.assertEqual(out, self._expected_streaming_output(self.outDir))
with open(out[out_name]["location"][len("file://") :], "r") as f:
self.assertEqual(f.read().strip(), "When is s4 coming out?")
@needs_cwl
class CWLSmallTests(ToilTest):
def test_usage_message(self):
"""
This is purely to ensure a (more) helpful error message is printed if a user does
not order their positional args correctly [cwl, cwl-job (json/yml/yaml), jobstore].
"""
toil = 'toil-cwl-runner'
cwl = 'test/cwl/revsort.cwl'
cwl_job_json = 'test/cwl/revsort-job.json'
jobstore = 'delete-test-toil'
random_option_1 = '--logInfo'
random_option_2 = '--disableCaching=false'
cmd_wrong_ordering_1 = [toil, cwl, cwl_job_json, jobstore, random_option_1, random_option_2]
cmd_wrong_ordering_2 = [toil, cwl, jobstore, random_option_1, random_option_2, cwl_job_json]
cmd_wrong_ordering_3 = [toil, jobstore, random_option_1, random_option_2, cwl, cwl_job_json]
for cmd in [cmd_wrong_ordering_1, cmd_wrong_ordering_2, cmd_wrong_ordering_3]:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertIn(b'Usage: toil-cwl-runner [options] example.cwl example-job.yaml', stderr)
self.assertIn(b'All positional arguments [cwl, yml_or_json] '
b'must always be specified last for toil-cwl-runner.', stderr)
def test_workflow_echo_string(self):
toil = 'toil-cwl-runner'
jobstore = f'--jobStore=file:explicit-local-jobstore-{uuid.uuid4()}'
option_1 = '--strict-memory-limit'
option_2 = '--force-docker-pull'
option_3 = '--clean=always'
cwl = os.path.join(self._projectRootPath(), 'src/toil/test/cwl/echo_string.cwl')
cmd = [toil, jobstore, option_1, option_2, option_3, cwl]
log.debug(f'Now running: {" ".join(cmd)}')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
assert stdout == b'{}', f"Got wrong output: {stdout}\nWith error: {stderr}"
assert b'Finished toil run successfully' in stderr
assert p.returncode == 0
def test_visit_top_cwl_class(self):
structure = {
'class': 'Directory',
'listing': [
{
'class': 'Directory',
'listing': [
{'class': 'File'},
{
'class': 'File',
'secondaryFiles': [
{'class': 'Directory'},
{'class': 'File'},
{'cruft'}
]
}
]
},
{'some garbage': 'yep'},
[],
None
]
}
self.counter = 0
def increment(thing: Dict) -> None:
"""
Make sure we are at something CWL object like, and count it.
"""
self.assertIn('class', thing)
self.counter += 1
# We should stop at the root when looking for a Directory
visit_top_cwl_class(structure, ('Directory',), increment)
self.assertEqual(self.counter, 1)
# We should see the top-level files when looking for a file
self.counter = 0
visit_top_cwl_class(structure, ('File',), increment)
self.assertEqual(self.counter, 2)
# When looking for a file or a directory we should stop at the first match to either.
self.counter = 0
visit_top_cwl_class(structure, ('File', 'Directory'), increment)
self.assertEqual(self.counter, 1)
def test_visit_cwl_class_and_reduce(self):
structure = {
'class': 'Directory',
'listing': [
{
'class': 'Directory',
'listing': [
{'class': 'File'},
{
'class': 'File',
'secondaryFiles': [
{'class': 'Directory'},
{'class': 'File'},
{'cruft'}
]
}
]
},
{'some garbage': 'yep'},
[],
None
]
}
self.down_count = 0
def op_down(thing: MutableMapping) -> int:
"""
Grab the ID of the thing we are at, and count what we visit going
down.
"""
self.down_count += 1
return id(thing)
self.up_count = 0
self.up_child_count = 0
def op_up(thing: MutableMapping, down_value: int, child_results: List[str]) -> str:
"""
Check the down return value and the up return values, and count
what we visit going up and what child relationships we have.
"""
self.assertEqual(down_value, id(thing))
for res in child_results:
self.assertEqual(res, "Sentinel value!")
self.up_child_count += 1
self.up_count += 1
return "Sentinel value!"
visit_cwl_class_and_reduce(structure, ('Directory',), op_down, op_up)
self.assertEqual(self.down_count, 3)
self.assertEqual(self.up_count, 3)
# Only 2 child relationships
self.assertEqual(self.up_child_count, 2)
def test_download_structure(self) -> None:
"""
Make sure that download_structure makes the right calls to what it thinks is the file store.
"""
# Define what we would download
fid1 = FileID('afile', 10, False)
fid2 = FileID('adifferentfile', 1000, True)
# And what directory structure it would be in
structure = {
'dir1': {
'dir2': {
'f1': 'toilfile:' + fid1.pack(),
'f1again': 'toilfile:' + fid1.pack(),
'dir2sub': {}
},
'dir3': {}
},
'anotherfile': 'toilfile:' + fid2.pack()
}
# Say where to put it on the filesystem
to_dir = self._createTempDir()
# Make a fake file store
file_store = Mock(AbstractFileStore)
# These will be populated.
# TODO: This cache seems unused. Remove it?
# This maps filesystem path to CWL URI
index = {}
# This maps CWL URI to filesystem path
existing = {}
# Do the download
download_structure(file_store, index, existing, structure, to_dir)
# Check the results
# 3 files should be made
self.assertEqual(len(index), 3)
# From 2 unique URIs
self.assertEqual(len(existing), 2)
# Make sure that the index contents (path to URI) are correct
self.assertIn(os.path.join(to_dir, 'dir1/dir2/f1'), index)
self.assertIn(os.path.join(to_dir, 'dir1/dir2/f1again'), index)
self.assertIn(os.path.join(to_dir, 'anotherfile'), index)
self.assertEqual(index[os.path.join(to_dir, 'dir1/dir2/f1')], structure['dir1']['dir2']['f1'])
self.assertEqual(index[os.path.join(to_dir, 'dir1/dir2/f1again')], structure['dir1']['dir2']['f1again'])
self.assertEqual(index[os.path.join(to_dir, 'anotherfile')], structure['anotherfile'])
# And the existing contents (URI to path)
self.assertIn('toilfile:' + fid1.pack(), existing)
self.assertIn('toilfile:' + fid2.pack(), existing)
self.assertIn(existing['toilfile:' + fid1.pack()], [os.path.join(to_dir, 'dir1/dir2/f1'), os.path.join(to_dir, 'dir1/dir2/f1again')])
self.assertEqual(existing['toilfile:' + fid2.pack()], os.path.join(to_dir, 'anotherfile'))
# The directory structure should be created for real
self.assertTrue(os.path.isdir(os.path.join(to_dir, 'dir1')))
self.assertTrue(os.path.isdir(os.path.join(to_dir, 'dir1/dir2')))
self.assertTrue(os.path.isdir(os.path.join(to_dir, 'dir1/dir2/dir2sub')))
self.assertTrue(os.path.isdir(os.path.join(to_dir, 'dir1/dir3')))
# The file store should have been asked to do the download
file_store.readGlobalFile.assert_has_calls([call(fid1, os.path.join(to_dir, 'dir1/dir2/f1'), symlink=True),
call(fid1, os.path.join(to_dir, 'dir1/dir2/f1again'), symlink=True),
call(fid2, os.path.join(to_dir, 'anotherfile'), symlink=True)], any_order=True)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py
|
'''Test fixtures for dagster-airflow.
These make very heavy use of fixture dependency and scope. If you're unfamiliar with pytest
fixtures, read: https://docs.pytest.org/en/latest/fixture.html.
'''
# pylint doesn't understand the way that pytest constructs fixture dependnecies
# pylint: disable=redefined-outer-name, unused-argument
import os
import shutil
import subprocess
import tempfile
import uuid
import docker
import pytest
from dagster import check
from dagster.utils import load_yaml_from_path, mkdir_p, pushd, script_relative_path
# Will be set in environment by pipeline.py -> tox.ini to:
# ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-1.amazonaws.com/dagster-airflow-demo:${BUILDKITE_BUILD_ID}
IMAGE = os.environ.get('DAGSTER_AIRFLOW_DOCKER_IMAGE')
@pytest.fixture(scope='module')
def airflow_home():
'''Check that AIRFLOW_HOME is set, and return it'''
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
'''Context manager for temporary directories.
pytest implicitly wraps in try/except.
'''
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def clean_airflow_home(airflow_home):
'''Ensure that the existing contents of AIRFLOW_HOME do not interfere with test.'''
airflow_dags_path = os.path.join(airflow_home, 'dags')
# Ensure Airflow DAGs folder exists
if not os.path.exists(airflow_dags_path):
os.makedirs(airflow_dags_path)
tempdir_path = tempfile.mkdtemp()
# Move existing DAGs aside for test
dags_files = os.listdir(airflow_dags_path)
for dag_file in dags_files:
shutil.move(os.path.join(airflow_dags_path, dag_file), tempdir_path)
yield
# Clean up DAGs produced by test
shutil.rmtree(airflow_dags_path)
os.makedirs(airflow_dags_path)
# Move original DAGs back
file_paths = os.listdir(tempdir_path)
for file_path in file_paths:
shutil.move(
os.path.join(tempdir_path, file_path), os.path.join(airflow_dags_path, file_path)
)
shutil.rmtree(tempdir_path)
@pytest.fixture(scope='session')
def docker_client():
'''Instantiate a Docker Python client.'''
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
# pylint: disable=protected-access
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='session')
def build_docker_image(docker_client):
with pushd(script_relative_path('test_project')):
subprocess.check_output(['./build.sh'], shell=True)
return IMAGE
@pytest.fixture(scope='session')
def docker_image(docker_client, build_docker_image):
'''Check that the airflow image exists.'''
try:
docker_client.images.get(build_docker_image)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=build_docker_image, script_path=script_relative_path('test_project/build.sh')
)
)
return build_docker_image
@pytest.fixture(scope='module')
def dags_path(airflow_home):
'''Abspath to the magic Airflow DAGs folder.'''
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
'''Abspath to the magic Airflow plugins folder.'''
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def environment_dict(s3_bucket):
env_dict = load_yaml_from_path(script_relative_path('test_project/env.yaml'))
env_dict['storage'] = {'s3': {'s3_bucket': s3_bucket}}
yield env_dict
@pytest.fixture(scope='session')
def s3_bucket():
yield 'dagster-scratch-80542c2'
|
[] |
[] |
[
"AIRFLOW_HOME",
"DAGSTER_AIRFLOW_DOCKER_IMAGE"
] |
[]
|
["AIRFLOW_HOME", "DAGSTER_AIRFLOW_DOCKER_IMAGE"]
|
python
| 2 | 0 | |
pkg/status/status.go
|
// Copyright Contributors to the Open Cluster Management project
package status
import (
"context"
"errors"
"os"
"time"
"github.com/go-kit/kit/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/stolostron/metrics-collector/pkg/logger"
"github.com/open-cluster-management/multicluster-monitoring-operator/pkg/apis"
oav1beta1 "github.com/open-cluster-management/multicluster-monitoring-operator/pkg/apis/observability/v1beta1"
)
const (
name = "observability-addon"
namespace = "open-cluster-management-addon-observability"
)
type StatusReport struct {
statusClient client.Client
logger log.Logger
}
func New(logger log.Logger) (*StatusReport, error) {
testMode := os.Getenv("UNIT_TEST") != ""
standaloneMode := os.Getenv("STANDALONE") == "true"
var kubeClient client.Client
if testMode {
kubeClient = fake.NewFakeClient()
} else if standaloneMode {
kubeClient = nil
} else {
config, err := clientcmd.BuildConfigFromFlags("", "")
if err != nil {
return nil, errors.New("Failed to create the kube config")
}
s := scheme.Scheme
if err := apis.AddToScheme(s); err != nil {
return nil, errors.New("Failed to add observabilityaddon into scheme")
}
kubeClient, err = client.New(config, client.Options{Scheme: s})
if err != nil {
return nil, errors.New("Failed to create the kube client")
}
}
return &StatusReport{
statusClient: kubeClient,
logger: log.With(logger, "component", "statusclient"),
}, nil
}
func (s *StatusReport) UpdateStatus(t string, r string, m string) error {
if s.statusClient == nil {
return nil
}
addon := &oav1beta1.ObservabilityAddon{}
err := s.statusClient.Get(context.TODO(), types.NamespacedName{
Name: name,
Namespace: namespace,
}, addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
return err
}
update := false
found := false
conditions := []oav1beta1.StatusCondition{}
lastestC := oav1beta1.StatusCondition{}
for _, c := range addon.Status.Conditions {
if c.Status == metav1.ConditionTrue {
if c.Type != t {
c.Status = metav1.ConditionFalse
} else {
found = true
if c.Reason != r || c.Message != m {
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
} else {
if c.Type == t {
found = true
c.Status = metav1.ConditionTrue
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
conditions = append(conditions, c)
}
if update {
conditions = append(conditions, lastestC)
}
if !found {
conditions = append(conditions, oav1beta1.StatusCondition{
Type: t,
Status: metav1.ConditionTrue,
Reason: r,
Message: m,
LastTransitionTime: metav1.NewTime(time.Now()),
})
update = true
}
if update {
addon.Status.Conditions = conditions
err = s.statusClient.Status().Update(context.TODO(), addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
}
return err
}
return nil
}
|
[
"\"UNIT_TEST\"",
"\"STANDALONE\""
] |
[] |
[
"STANDALONE",
"UNIT_TEST"
] |
[]
|
["STANDALONE", "UNIT_TEST"]
|
go
| 2 | 0 | |
lib/costanalyzer/costanalyzer.go
|
// Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: AGPL-3.0
package costanalyzer
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
)
const timestampFormat = "2006-01-02T15:04:05"
type nodeInfo struct {
// Legacy (records created by Arvados Node Manager with Arvados <= 1.4.3)
Properties struct {
CloudNode struct {
Price float64
Size string
} `json:"cloud_node"`
}
// Modern
ProviderType string
Price float64
Preemptible bool
}
type consumption struct {
cost float64
duration float64
}
func (c *consumption) Add(n consumption) {
c.cost += n.cost
c.duration += n.duration
}
type arrayFlags []string
func (i *arrayFlags) String() string {
return ""
}
func (i *arrayFlags) Set(value string) error {
for _, s := range strings.Split(value, ",") {
*i = append(*i, s)
}
return nil
}
func (c *command) parseFlags(prog string, args []string, logger *logrus.Logger, stderr io.Writer) (exitCode int, err error) {
var beginStr, endStr string
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
flags.Usage = func() {
fmt.Fprintf(flags.Output(), `
Usage:
%s [options ...] [UUID ...]
This program analyzes the cost of Arvados container requests and calculates
the total cost across all requests. At least one UUID or a timestamp range
must be specified.
When the '-output' option is specified, a set of CSV files with cost details
will be written to the provided directory. Each file is a CSV report that lists
all the containers used to fulfill the container request, together with the
machine type and cost of each container.
When supplied with the UUID of a container request, it will calculate the
cost of that container request and all its children.
When supplied with the UUID of a collection, it will see if there is a
container_request UUID in the properties of the collection, and if so, it
will calculate the cost of that container request and all its children.
When supplied with a project UUID or when supplied with multiple container
request or collection UUIDs, it will calculate the total cost for all
supplied UUIDs.
When supplied with a 'begin' and 'end' timestamp (format:
%s), it will calculate the cost for all top-level container
requests whose containers finished during the specified interval.
The total cost calculation takes container reuse into account: if a container
was reused between several container requests, its cost will only be counted
once.
Caveats:
- This program uses the cost data from config.yml at the time of the
execution of the container, stored in the 'node.json' file in its log
collection. If the cost data was not correctly configured at the time the
container was executed, the output from this program will be incorrect.
- If a container was run on a preemptible ("spot") instance, the cost data
reported by this program may be wildly inaccurate, because it does not have
access to the spot pricing in effect for the node then the container ran. The
UUID report file that is generated when the '-output' option is specified has
a column that indicates the preemptible state of the instance that ran the
container.
- This program does not take into account overhead costs like the time spent
starting and stopping compute nodes that run containers, the cost of the
permanent cloud nodes that provide the Arvados services, the cost of data
stored in Arvados, etc.
- When provided with a project UUID, subprojects will not be considered.
In order to get the data for the UUIDs supplied, the ARVADOS_API_HOST and
ARVADOS_API_TOKEN environment variables must be set.
This program prints the total dollar amount from the aggregate cost
accounting across all provided UUIDs on stdout.
Options:
`, prog, timestampFormat)
flags.PrintDefaults()
}
loglevel := flags.String("log-level", "info", "logging `level` (debug, info, ...)")
flags.StringVar(&c.resultsDir, "output", "", "output `directory` for the CSV reports")
flags.StringVar(&beginStr, "begin", "", fmt.Sprintf("timestamp `begin` for date range operation (format: %s)", timestampFormat))
flags.StringVar(&endStr, "end", "", fmt.Sprintf("timestamp `end` for date range operation (format: %s)", timestampFormat))
flags.BoolVar(&c.cache, "cache", true, "create and use a local disk cache of Arvados objects")
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
exitCode = 1
return
} else if err != nil {
exitCode = 2
return
}
c.uuids = flags.Args()
if (len(beginStr) != 0 && len(endStr) == 0) || (len(beginStr) == 0 && len(endStr) != 0) {
flags.Usage()
err = fmt.Errorf("When specifying a date range, both begin and end must be specified")
exitCode = 2
return
}
if len(beginStr) != 0 {
var errB, errE error
c.begin, errB = time.Parse(timestampFormat, beginStr)
c.end, errE = time.Parse(timestampFormat, endStr)
if (errB != nil) || (errE != nil) {
flags.Usage()
err = fmt.Errorf("When specifying a date range, both begin and end must be of the format %s %+v, %+v", timestampFormat, errB, errE)
exitCode = 2
return
}
}
if (len(c.uuids) < 1) && (len(beginStr) == 0) {
flags.Usage()
err = fmt.Errorf("error: no uuid(s) provided")
exitCode = 2
return
}
lvl, err := logrus.ParseLevel(*loglevel)
if err != nil {
exitCode = 2
return
}
logger.SetLevel(lvl)
if !c.cache {
logger.Debug("Caching disabled")
}
return
}
func ensureDirectory(logger *logrus.Logger, dir string) (err error) {
statData, err := os.Stat(dir)
if os.IsNotExist(err) {
err = os.MkdirAll(dir, 0700)
if err != nil {
return fmt.Errorf("error creating directory %s: %s", dir, err.Error())
}
} else {
if !statData.IsDir() {
return fmt.Errorf("the path %s is not a directory", dir)
}
}
return
}
func addContainerLine(logger *logrus.Logger, node nodeInfo, cr arvados.ContainerRequest, container arvados.Container) (string, consumption) {
var csv string
var containerConsumption consumption
csv = cr.UUID + ","
csv += cr.Name + ","
csv += container.UUID + ","
csv += string(container.State) + ","
if container.StartedAt != nil {
csv += container.StartedAt.String() + ","
} else {
csv += ","
}
var delta time.Duration
if container.FinishedAt != nil {
csv += container.FinishedAt.String() + ","
delta = container.FinishedAt.Sub(*container.StartedAt)
csv += strconv.FormatFloat(delta.Seconds(), 'f', 3, 64) + ","
} else {
csv += ",,"
}
var price float64
var size string
if node.Properties.CloudNode.Price != 0 {
price = node.Properties.CloudNode.Price
size = node.Properties.CloudNode.Size
} else {
price = node.Price
size = node.ProviderType
}
containerConsumption.cost = delta.Seconds() / 3600 * price
containerConsumption.duration = delta.Seconds()
csv += size + "," + fmt.Sprintf("%+v", node.Preemptible) + "," + strconv.FormatFloat(price, 'f', 8, 64) + "," + strconv.FormatFloat(containerConsumption.cost, 'f', 8, 64) + "\n"
return csv, containerConsumption
}
func loadCachedObject(logger *logrus.Logger, file string, uuid string, object interface{}) (reload bool) {
reload = true
if strings.Contains(uuid, "-j7d0g-") || strings.Contains(uuid, "-4zz18-") {
// We do not cache projects or collections, they have no final state
return
}
// See if we have a cached copy of this object
_, err := os.Stat(file)
if err != nil {
return
}
data, err := ioutil.ReadFile(file)
if err != nil {
logger.Errorf("error reading %q: %s", file, err)
return
}
err = json.Unmarshal(data, &object)
if err != nil {
logger.Errorf("failed to unmarshal json: %s: %s", data, err)
return
}
// See if it is in a final state, if that makes sense
switch v := object.(type) {
case *arvados.ContainerRequest:
if v.State == arvados.ContainerRequestStateFinal {
reload = false
logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
}
case *arvados.Container:
if v.State == arvados.ContainerStateComplete || v.State == arvados.ContainerStateCancelled {
reload = false
logger.Debugf("Loaded object %s from local cache (%s)", uuid, file)
}
}
return
}
// Load an Arvados object.
func loadObject(logger *logrus.Logger, ac *arvados.Client, path string, uuid string, cache bool, object interface{}) (err error) {
file := uuid + ".json"
var reload bool
var cacheDir string
if !cache {
reload = true
} else {
homeDir, err := os.UserHomeDir()
if err != nil {
reload = true
logger.Info("Unable to determine current user home directory, not using cache")
} else {
cacheDir = homeDir + "/.cache/arvados/costanalyzer/"
err = ensureDirectory(logger, cacheDir)
if err != nil {
reload = true
logger.Infof("Unable to create cache directory at %s, not using cache: %s", cacheDir, err.Error())
} else {
reload = loadCachedObject(logger, cacheDir+file, uuid, object)
}
}
}
if !reload {
return
}
if strings.Contains(uuid, "-j7d0g-") {
err = ac.RequestAndDecode(&object, "GET", "arvados/v1/groups/"+uuid, nil, nil)
} else if strings.Contains(uuid, "-xvhdp-") {
err = ac.RequestAndDecode(&object, "GET", "arvados/v1/container_requests/"+uuid, nil, nil)
} else if strings.Contains(uuid, "-dz642-") {
err = ac.RequestAndDecode(&object, "GET", "arvados/v1/containers/"+uuid, nil, nil)
} else if strings.Contains(uuid, "-4zz18-") {
err = ac.RequestAndDecode(&object, "GET", "arvados/v1/collections/"+uuid, nil, nil)
} else {
err = fmt.Errorf("unsupported object type with UUID %q:\n %s", uuid, err)
return
}
if err != nil {
err = fmt.Errorf("error loading object with UUID %q:\n %s", uuid, err)
return
}
encoded, err := json.MarshalIndent(object, "", " ")
if err != nil {
err = fmt.Errorf("error marshaling object with UUID %q:\n %s", uuid, err)
return
}
if cacheDir != "" {
err = ioutil.WriteFile(cacheDir+file, encoded, 0644)
if err != nil {
err = fmt.Errorf("error writing file %s:\n %s", file, err)
return
}
}
return
}
func getNode(arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, cr arvados.ContainerRequest) (node nodeInfo, err error) {
if cr.LogUUID == "" {
err = errors.New("no log collection")
return
}
var collection arvados.Collection
err = ac.RequestAndDecode(&collection, "GET", "arvados/v1/collections/"+cr.LogUUID, nil, nil)
if err != nil {
err = fmt.Errorf("error getting collection: %s", err)
return
}
var fs arvados.CollectionFileSystem
fs, err = collection.FileSystem(ac, kc)
if err != nil {
err = fmt.Errorf("error opening collection as filesystem: %s", err)
return
}
var f http.File
f, err = fs.Open("node.json")
if err != nil {
err = fmt.Errorf("error opening file 'node.json' in collection %s: %s", cr.LogUUID, err)
return
}
err = json.NewDecoder(f).Decode(&node)
if err != nil {
err = fmt.Errorf("error reading file 'node.json' in collection %s: %s", cr.LogUUID, err)
return
}
return
}
func handleProject(logger *logrus.Logger, uuid string, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, resultsDir string, cache bool) (cost map[string]consumption, err error) {
cost = make(map[string]consumption)
var project arvados.Group
err = loadObject(logger, ac, uuid, uuid, cache, &project)
if err != nil {
return nil, fmt.Errorf("error loading object %s: %s", uuid, err.Error())
}
var childCrs map[string]interface{}
filterset := []arvados.Filter{
{
Attr: "owner_uuid",
Operator: "=",
Operand: project.UUID,
},
{
Attr: "requesting_container_uuid",
Operator: "=",
Operand: nil,
},
}
err = ac.RequestAndDecode(&childCrs, "GET", "arvados/v1/container_requests", nil, map[string]interface{}{
"filters": filterset,
"limit": 10000,
})
if err != nil {
return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
}
if value, ok := childCrs["items"]; ok {
logger.Infof("Collecting top level container requests in project %s", uuid)
items := value.([]interface{})
for _, item := range items {
itemMap := item.(map[string]interface{})
crInfo, err := generateCrInfo(logger, itemMap["uuid"].(string), arv, ac, kc, resultsDir, cache)
if err != nil {
return nil, fmt.Errorf("error generating container_request CSV: %s", err.Error())
}
for k, v := range crInfo {
cost[k] = v
}
}
} else {
logger.Infof("No top level container requests found in project %s", uuid)
}
return
}
func generateCrInfo(logger *logrus.Logger, uuid string, arv *arvadosclient.ArvadosClient, ac *arvados.Client, kc *keepclient.KeepClient, resultsDir string, cache bool) (cost map[string]consumption, err error) {
cost = make(map[string]consumption)
csv := "CR UUID,CR name,Container UUID,State,Started At,Finished At,Duration in seconds,Compute node type,Preemptible,Hourly node cost,Total cost\n"
var tmpCsv string
var total, tmpTotal consumption
logger.Debugf("Processing %s", uuid)
var crUUID = uuid
if strings.Contains(uuid, "-4zz18-") {
// This is a collection, find the associated container request (if any)
var c arvados.Collection
err = loadObject(logger, ac, uuid, uuid, cache, &c)
if err != nil {
return nil, fmt.Errorf("error loading collection object %s: %s", uuid, err)
}
value, ok := c.Properties["container_request"]
if !ok {
return nil, fmt.Errorf("error: collection %s does not have a 'container_request' property", uuid)
}
crUUID, ok = value.(string)
if !ok {
return nil, fmt.Errorf("error: collection %s does not have a 'container_request' property of the string type", uuid)
}
}
// This is a container request, find the container
var cr arvados.ContainerRequest
err = loadObject(logger, ac, crUUID, crUUID, cache, &cr)
if err != nil {
return nil, fmt.Errorf("error loading cr object %s: %s", uuid, err)
}
if len(cr.ContainerUUID) == 0 {
// Nothing to do! E.g. a CR in 'Uncommitted' state.
logger.Infof("No container associated with container request %s, skipping", crUUID)
return nil, nil
}
var container arvados.Container
err = loadObject(logger, ac, crUUID, cr.ContainerUUID, cache, &container)
if err != nil {
return nil, fmt.Errorf("error loading container object %s: %s", cr.ContainerUUID, err)
}
topNode, err := getNode(arv, ac, kc, cr)
if err != nil {
logger.Errorf("Skipping container request %s: error getting node %s: %s", cr.UUID, cr.UUID, err)
return nil, nil
}
tmpCsv, total = addContainerLine(logger, topNode, cr, container)
csv += tmpCsv
cost[container.UUID] = total
// Find all container requests that have the container we found above as requesting_container_uuid
var childCrs arvados.ContainerRequestList
filterset := []arvados.Filter{
{
Attr: "requesting_container_uuid",
Operator: "=",
Operand: container.UUID,
}}
err = ac.RequestAndDecode(&childCrs, "GET", "arvados/v1/container_requests", nil, map[string]interface{}{
"filters": filterset,
"limit": 10000,
})
if err != nil {
return nil, fmt.Errorf("error querying container_requests: %s", err.Error())
}
logger.Infof("Collecting child containers for container request %s (%s)", crUUID, container.FinishedAt)
progressTicker := time.NewTicker(5 * time.Second)
defer progressTicker.Stop()
for i, cr2 := range childCrs.Items {
select {
case <-progressTicker.C:
logger.Infof("... %d of %d", i+1, len(childCrs.Items))
default:
}
node, err := getNode(arv, ac, kc, cr2)
if err != nil {
logger.Errorf("Skipping container request %s: error getting node %s: %s", cr2.UUID, cr2.UUID, err)
continue
}
logger.Debug("Child container: " + cr2.ContainerUUID)
var c2 arvados.Container
err = loadObject(logger, ac, cr.UUID, cr2.ContainerUUID, cache, &c2)
if err != nil {
return nil, fmt.Errorf("error loading object %s: %s", cr2.ContainerUUID, err)
}
tmpCsv, tmpTotal = addContainerLine(logger, node, cr2, c2)
cost[cr2.ContainerUUID] = tmpTotal
csv += tmpCsv
total.Add(tmpTotal)
}
logger.Debug("Done collecting child containers")
csv += "TOTAL,,,,,," + strconv.FormatFloat(total.duration, 'f', 3, 64) + ",,,," + strconv.FormatFloat(total.cost, 'f', 2, 64) + "\n"
if resultsDir != "" {
// Write the resulting CSV file
fName := resultsDir + "/" + crUUID + ".csv"
err = ioutil.WriteFile(fName, []byte(csv), 0644)
if err != nil {
return nil, fmt.Errorf("error writing file with path %s: %s", fName, err.Error())
}
logger.Infof("\nUUID report in %s", fName)
}
return
}
func (c *command) costAnalyzer(prog string, args []string, logger *logrus.Logger, stdout, stderr io.Writer) (exitcode int, err error) {
exitcode, err = c.parseFlags(prog, args, logger, stderr)
if exitcode != 0 {
return
}
if c.resultsDir != "" {
err = ensureDirectory(logger, c.resultsDir)
if err != nil {
exitcode = 3
return
}
}
uuidChannel := make(chan string)
// Arvados Client setup
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
err = fmt.Errorf("error creating Arvados object: %s", err)
exitcode = 1
return
}
kc, err := keepclient.MakeKeepClient(arv)
if err != nil {
err = fmt.Errorf("error creating Keep object: %s", err)
exitcode = 1
return
}
ac := arvados.NewClientFromEnv()
// Populate uuidChannel with the requested uuid list
go func() {
defer close(uuidChannel)
for _, uuid := range c.uuids {
uuidChannel <- uuid
}
if !c.begin.IsZero() {
initialParams := arvados.ResourceListParams{
Filters: []arvados.Filter{{"container.finished_at", ">=", c.begin}, {"container.finished_at", "<", c.end}, {"requesting_container_uuid", "=", nil}},
Order: "created_at",
}
params := initialParams
for {
// This list variable must be a new one declared
// inside the loop: otherwise, items in the API
// response would get deep-merged into the items
// loaded in previous iterations.
var list arvados.ContainerRequestList
err := ac.RequestAndDecode(&list, "GET", "arvados/v1/container_requests", nil, params)
if err != nil {
logger.Errorf("Error getting container request list from Arvados API: %s", err)
break
}
if len(list.Items) == 0 {
break
}
for _, i := range list.Items {
uuidChannel <- i.UUID
}
params.Offset += len(list.Items)
}
}
}()
cost := make(map[string]consumption)
for uuid := range uuidChannel {
logger.Debugf("Considering %s", uuid)
if strings.Contains(uuid, "-j7d0g-") {
// This is a project (group)
cost, err = handleProject(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
if err != nil {
exitcode = 1
return
}
for k, v := range cost {
cost[k] = v
}
} else if strings.Contains(uuid, "-xvhdp-") || strings.Contains(uuid, "-4zz18-") {
// This is a container request
var crInfo map[string]consumption
crInfo, err = generateCrInfo(logger, uuid, arv, ac, kc, c.resultsDir, c.cache)
if err != nil {
err = fmt.Errorf("error generating CSV for uuid %s: %s", uuid, err.Error())
exitcode = 2
return
}
for k, v := range crInfo {
cost[k] = v
}
} else if strings.Contains(uuid, "-tpzed-") {
// This is a user. The "Home" project for a user is not a real project.
// It is identified by the user uuid. As such, cost analysis for the
// "Home" project is not supported by this program. Skip this uuid, but
// keep going.
logger.Errorf("cost analysis is not supported for the 'Home' project: %s", uuid)
} else {
logger.Errorf("this argument does not look like a uuid: %s", uuid)
exitcode = 3
return
}
}
if len(cost) == 0 {
logger.Info("Nothing to do!")
return
}
var csv string
csv = "# Aggregate cost accounting for uuids:\n# UUID, Duration in seconds, Total cost\n"
for _, uuid := range c.uuids {
csv += "# " + uuid + "\n"
}
var total consumption
for k, v := range cost {
csv += k + "," + strconv.FormatFloat(v.duration, 'f', 3, 64) + "," + strconv.FormatFloat(v.cost, 'f', 8, 64) + "\n"
total.Add(v)
}
csv += "TOTAL," + strconv.FormatFloat(total.duration, 'f', 3, 64) + "," + strconv.FormatFloat(total.cost, 'f', 2, 64) + "\n"
if c.resultsDir != "" {
// Write the resulting CSV file
aFile := c.resultsDir + "/" + time.Now().Format("2006-01-02-15-04-05") + "-aggregate-costaccounting.csv"
err = ioutil.WriteFile(aFile, []byte(csv), 0644)
if err != nil {
err = fmt.Errorf("error writing file with path %s: %s", aFile, err.Error())
exitcode = 1
return
}
logger.Infof("Aggregate cost accounting for all supplied uuids in %s", aFile)
}
// Output the total dollar amount on stdout
fmt.Fprintf(stdout, "%s\n", strconv.FormatFloat(total.cost, 'f', 2, 64))
return
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/main/java/dmo/fs/spa/db/SpaDatabasePostgres.java
|
package dmo.fs.spa.db;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import com.fasterxml.jackson.databind.JsonNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import dmo.fs.spa.utils.SpaLogin;
import dmo.fs.spa.utils.SpaLoginImpl;
import dmo.fs.utils.DodexUtil;
import io.reactivex.Completable;
import io.reactivex.Single;
import io.reactivex.disposables.Disposable;
import io.vertx.core.Future;
import io.vertx.pgclient.PgConnectOptions;
import io.vertx.reactivex.core.Promise;
import io.vertx.reactivex.core.Vertx;
import io.vertx.reactivex.pgclient.PgPool;
import io.vertx.reactivex.sqlclient.Row;
import io.vertx.reactivex.sqlclient.RowSet;
import io.vertx.sqlclient.PoolOptions;
public class SpaDatabasePostgres extends DbPostgres {
private final static Logger logger = LoggerFactory.getLogger(SpaDatabasePostgres.class.getName());
protected Disposable disposable;
protected Properties dbProperties = new Properties();
protected Map<String, String> dbOverrideMap = new ConcurrentHashMap<>();
protected Map<String, String> dbMap = new ConcurrentHashMap<>();
protected JsonNode defaultNode;
protected String webEnv = System.getenv("VERTXWEB_ENVIRONMENT");
protected DodexUtil dodexUtil = new DodexUtil();
protected PgPool pool4;
private Vertx vertx;
public SpaDatabasePostgres(Map<String, String> dbOverrideMap, Properties dbOverrideProps)
throws InterruptedException, IOException, SQLException {
super();
defaultNode = dodexUtil.getDefaultNode();
webEnv = webEnv == null || "prod".equals(webEnv) ? "prod" : "dev";
dbMap = dodexUtil.jsonNodeToMap(defaultNode, webEnv);
dbProperties = dodexUtil.mapToProperties(dbMap);
if (dbOverrideProps != null) {
this.dbProperties = dbOverrideProps;
}
if (dbOverrideMap != null) {
this.dbOverrideMap = dbOverrideMap;
}
SpaDbConfiguration.mapMerge(dbMap, dbOverrideMap);
}
public SpaDatabasePostgres() throws InterruptedException, IOException, SQLException {
super();
defaultNode = dodexUtil.getDefaultNode();
webEnv = webEnv == null || "prod".equals(webEnv) ? "prod" : "dev";
dbMap = dodexUtil.jsonNodeToMap(defaultNode, webEnv);
dbProperties = dodexUtil.mapToProperties(dbMap);
}
@Override
public Future<Void> databaseSetup() throws InterruptedException, SQLException {
// Override default credentials
// dbProperties.setProperty("user", "myUser");
// dbProperties.setProperty("password", "myPassword");
// dbProperties.setProperty("ssl", "false");
if ("dev".equals(webEnv)) {
// dbMap.put("dbname", "/myDbname"); // this wiil be merged into the default map
SpaDbConfiguration.configureTestDefaults(dbMap, dbProperties);
} else {
SpaDbConfiguration.configureDefaults(dbMap, dbProperties); // Prod
}
PoolOptions poolOptions = new PoolOptions().setMaxSize(Runtime.getRuntime().availableProcessors() * 5);
PgConnectOptions connectOptions;
connectOptions = new PgConnectOptions()
.setHost(dbMap.get("host2"))
.setPort(Integer.valueOf(dbMap.get("port")))
.setUser(dbProperties.getProperty("user"))
.setPassword(dbProperties.getProperty("password"))
.setDatabase(dbMap.get("database"))
.setSsl(Boolean.valueOf(dbProperties.getProperty("ssl")))
.setIdleTimeout(1)
// .setCachePreparedStatements(true)
;
vertx = DodexUtil.getVertx();
pool4 = PgPool.pool(vertx, connectOptions, poolOptions);
Completable completable = pool4.rxGetConnection().flatMapCompletable(conn ->
conn.rxBegin().flatMapCompletable(
tx -> conn.query(CHECKLOGINSQL).rxExecute().doOnSuccess(rows -> {
for (Row row : rows) {
if (row.getValue(0) == null) {
final String usersSql = getCreateTable("LOGIN").replace("dummy",
dbProperties.get("user").toString());
Single<RowSet<Row>> crow = conn.query(usersSql).rxExecute()
.doOnError(err -> {
logger.info(String.format("Login Table Error: %s", err.getMessage()));
}).doOnSuccess(result -> {
logger.info("Login Table Added.");
});
crow.subscribe(result -> {
//
}, err -> {
logger.info(String.format("Login Table Error: %s", err.getMessage()));
});
}
}
}).doOnError(err -> {
logger.info(String.format("Login Table Error: %s", err.getMessage()));
}).flatMapCompletable(res -> tx.rxCommit())
));
Promise<Void> setupPromise = Promise.promise();
completable.subscribe(() -> {
try {
setupSql(pool4);
setupPromise.complete();
} catch (SQLException e) {
e.printStackTrace();
}
}, err -> {
logger.info(String.format("Tables Create Error: %s", err.getMessage()));
});
return setupPromise.future();
}
@Override
public SpaLogin createSpaLogin() {
return new SpaLoginImpl();
}
@Override
@SuppressWarnings("unchecked")
public <T> T getPool4() {
return (T) pool4;
}
@Override
public Vertx getVertx() {
return vertx;
}
@Override
public void setVertx(Vertx vertx) {
this.vertx = vertx;
}
}
|
[
"\"VERTXWEB_ENVIRONMENT\""
] |
[] |
[
"VERTXWEB_ENVIRONMENT"
] |
[]
|
["VERTXWEB_ENVIRONMENT"]
|
java
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'secure_auth_rest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
rekognitionlambda/index.py
|
#
# Lambda function to detect labels in image using Amazon Rekognition
#
import logging
import boto3
from botocore.exceptions import ClientError
import os
from urllib.parse import unquote_plus
from boto3.dynamodb.conditions import Key, Attr
import uuid
from PIL import Image
import json
thumbBucket = os.environ['RESIZEDBUCKET']
# Set the minimum confidence for Amazon Rekognition
minConfidence = 50
"""MinConfidence parameter (float) -- Specifies the minimum confidence level for the labels to return.
Amazon Rekognition doesn't return any labels with a confidence lower than this specified value.
If you specify a value of 0, all labels are returned, regardless of the default thresholds that the
model version applies."""
## Instantiate service clients outside of handler for context reuse / performance
# Constructor for our s3 client object
s3_client = boto3.client('s3')
# Constructor to create rekognition client object
rekognition_client = boto3.client('rekognition')
# Constructor for DynamoDB resource object
dynamodb = boto3.resource('dynamodb')
def handler(event, context):
print("DEBUG")
print("Lambda processing event: ", event)
# For each message (photo) get the bucket name and key
for response in event['Records']:
formatted = json.loads(response['body'])
for record in formatted['Records']:
ourBucket = record['s3']['bucket']['name']
ourKey = record['s3']['object']['key']
# For each bucket/key, retrieve labels
generateThumb(ourBucket, ourKey)
rekFunction(ourBucket, ourKey)
return
def generateThumb(ourBucket, ourKey):
# Clean the string to add the colon back into requested name
safeKey = replaceSubstringWithColon(ourKey)
# Define upload and download paths
key = unquote_plus(safeKey)
tmpkey = key.replace('/', '')
download_path = '/tmp/{}{}'.format(uuid.uuid4(), tmpkey)
upload_path = '/tmp/resized-{}'.format(tmpkey)
# Download file from s3 and store it in Lambda /tmp storage (512MB avail)
try:
s3_client.download_file(ourBucket, key, download_path)
except ClientError as e:
logging.error(e)
# Create our thumbnail using Pillow library
resize_image(download_path, upload_path)
# Upload the thumbnail to the thumbnail bucket
try:
s3_client.upload_file(upload_path, thumbBucket, safeKey)
except ClientError as e:
logging.error(e)
# Be good little citizens and clean up files in /tmp so that we don't run out of space
os.remove(upload_path)
os.remove(download_path)
return
def resize_image(image_path, resized_path):
with Image.open(image_path) as image:
image.thumbnail(tuple(x / 2 for x in image.size))
image.save(resized_path)
def rekFunction(ourBucket, ourKey):
# Clean the string to add the colon back into requested name which was substitued by Amplify Library.
safeKey = replaceSubstringWithColon(ourKey)
print('Currently processing the following image')
print('Bucket: ' + ourBucket + ' key name: ' + safeKey)
detectLabelsResults = {}
# Try and retrieve labels from Amazon Rekognition, using the confidence level we set in minConfidence var
try:
detectLabelsResults = rekognition_client.detect_labels(Image={'S3Object': {'Bucket':ourBucket, 'Name':safeKey}},
MaxLabels=10,
MinConfidence=minConfidence)
except ClientError as e:
logging.error(e)
# Create our array and dict for our label construction
objectsDetected = []
imageLabels = {
'image': safeKey
}
# Add all of our labels into imageLabels by iterating over response['Labels']
for label in detectLabelsResults['Labels']:
newItem = label['Name']
objectsDetected.append(newItem)
objectNum = len(objectsDetected)
itemAtt = f"object{objectNum}"
# We now have our shiny new item ready to put into DynamoDB
imageLabels[itemAtt] = newItem
# Instantiate a table resource object of our environment variable
imageLabelsTable = os.environ['TABLE']
table = dynamodb.Table(imageLabelsTable)
# Put item into table
try:
table.put_item(Item=imageLabels)
except ClientError as e:
logging.error(e)
return
# Clean the string to add the colon back into requested name
def replaceSubstringWithColon(txt):
return txt.replace("%3A", ":")
|
[] |
[] |
[
"TABLE",
"RESIZEDBUCKET"
] |
[]
|
["TABLE", "RESIZEDBUCKET"]
|
python
| 2 | 0 | |
communicationsexample/settings.py
|
"""
Django settings for communicationsexample project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '152-mhd6)p6r&x(e=%k&$q5!dl8uug$f04k@7&^*6j_xpkucl@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gurucommunication',
'django_markdown2',
'anymail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'communicationsexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'communicationsexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
## Mail variables:
# SMS / TWILLIO
TWILLIO_SID = os.environ.get('TWILLIO_SID', None)
TWILLIO_AUTH_TOKEN = os.environ.get('TWILLIO_AUTH_TOKEN', None)
TWILLIO_PHONE_NUMBER = os.environ.get('TWILLIO_PHONE_NUMBER', None)
SMS_BACKEND = 'services.backends.twillio.TwillioBackend'
# EMAIL / MailGun
ANYMAIL = {
# (exact settings here depend on your ESP...)
"MAILGUN_API_KEY": os.environ.get('MAILGUN_API_KEY', None),
"MAILGUN_SENDER_DOMAIN": os.environ.get('MAILGUN_SENDER_DOMAIN', None), # your Mailgun domain, if needed
}
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend" # or sendgrid.SendGridBackend, or...
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', None)
class GURU_MEDIUMS:
SMS = 'SMS'
EMAIL = 'EMAIL'
GURU_COMMUNICATIONS = {
'SMS_BACKEND': 'gurucommunication.backends.twillio.TwillioBackend',
'PREFERENCE_ORDER': ['SMS', 'EMAIL', 'NOTIFICATION'],
'DEFAULT_DELIVERY_METHOD': 'EMAIL'
}
|
[] |
[] |
[
"MAILGUN_API_KEY",
"TWILLIO_PHONE_NUMBER",
"TWILLIO_SID",
"DEFAULT_FROM_EMAIL",
"TWILLIO_AUTH_TOKEN",
"MAILGUN_SENDER_DOMAIN"
] |
[]
|
["MAILGUN_API_KEY", "TWILLIO_PHONE_NUMBER", "TWILLIO_SID", "DEFAULT_FROM_EMAIL", "TWILLIO_AUTH_TOKEN", "MAILGUN_SENDER_DOMAIN"]
|
python
| 6 | 0 | |
ansible-devel/test/integration/targets/cli/test_k_and_K.py
|
#!/usr/bin/env python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import pexpect
os.environ['ANSIBLE_NOCOLOR'] = '1'
out = pexpect.run(
'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} '
'-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]),
events={
'SSH password:': 'secretpassword\n',
'BECOME password': 'secretpassword\n',
},
timeout=10
)
print(out)
assert b'cliuser2' in out
|
[] |
[] |
[
"ANSIBLE_NOCOLOR"
] |
[]
|
["ANSIBLE_NOCOLOR"]
|
python
| 1 | 0 | |
ament_tools/helper.py
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import filecmp
from multiprocessing import cpu_count
import os
import re
import shlex
import shutil
import stat
from ament_tools.package_types import package_exists_at
def argparse_existing_dir(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError("Path '%s' does not exist" % path)
if not os.path.isdir(path):
raise argparse.ArgumentTypeError("Path '%s' is not a directory" % path)
return path
def argparse_existing_package(path):
path = argparse_existing_dir(path)
if not package_exists_at(path):
raise argparse.ArgumentTypeError(
"Path '%s' does not contain a package" % path)
return path
def determine_path_argument(cwd, base_path, argument, default):
if argument is None:
# if no argument is passed the default is relative to the base_path
return os.path.join(base_path, default)
# if an argument is passed it is relative to cwd (or absolute)
return os.path.abspath(os.path.join(cwd, argument))
def extract_jobs_flags(arguments):
"""
Extract make job flags from a list of other make flags, i.e. -j8 -l8.
:param arguments: string of space separated arguments which may or may not
contain make job flags
:type arguments: str
:returns: list of make jobs flags as a space separated string
:rtype: str
"""
regex = (
r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))'
r'|'
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
)
matches = re.findall(regex, arguments) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def combine_make_flags(make_flags, args, extras):
"""
Combine make flags and arg's make job flags with make_flags in extras.
:param list make_flags: existing make_flags, extracted from args already.
:param list args: command line args with ``--make-flags ...`` extracted.
:param dict extras: extras dict to which make flags are added/extended.
"""
# Add make_flags in extras, if they exist, to verb's --make-flags
make_flags += extras.get('make_flags', [])
# Extract make job arguments from main arguments and add to make_flags
make_job_flags = extract_jobs_flags(' '.join(args))
if make_job_flags:
args = re.sub(make_job_flags, '', ' '.join(args)).split()
make_flags.extend(make_job_flags.split())
# Ensure make args will have job flags and then store make_flags in extras
extras['make_flags'] = ensure_make_job_flags(make_flags)
return args
def ensure_make_job_flags(input_make_args):
"""
Ensure that make will get correct job flags, either from args or env.
If no job flags are present and there are none in the MAKEFLAGS environment
variable, then make flags are set to the cpu_count, e.g. -j4 -l4.
:param input_make_args: list of make arguments to be handled
:type input_make_args: list
:returns: copied list of make arguments, potentially with modifications
:rtype: list
"""
make_args = list(input_make_args)
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if extract_jobs_flags(os.environ.get('MAKEFLAGS', '')):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# Use the number of CPU cores
try:
jobs = cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined,
# then do not extend args
pass
return make_args
def extract_argument_group(args, delimiting_option):
"""
Extract a group of arguments from a list of arguments using a delimiter.
Here is an example:
.. code-block:: python
>>> extract_argument_group(['foo', '--args', 'bar', '--baz'], '--args')
(['foo'], ['bar', '--baz'])
The group can always be ended using the double hyphen ``--``.
In order to pass a double hyphen as arguments, use three hyphens ``---``.
Any set of hyphens encountered after the delimiter, and up to ``--``, which
have three or more hyphens and are isolated, will be captured and reduced
by one hyphen.
For example:
.. code-block:: python
>> extract_argument_group(['foo',
'--args', 'bar', '--baz', '---', '--',
'--foo-option'], '--args')
(['foo', '--foo-option'], ['bar', '--baz', '--'])
In the result the ``--`` comes from the ``---`` in the input.
The ``--args`` and the corresponding ``--`` are removed entirely.
The delimiter and ``--`` terminator combination can also happen multiple
times, in which case the bodies of arguments are combined and returned in
the order they appeared.
For example:
.. code-block:: python
>> extract_argument_group(['foo',
'--args', 'ping', '--',
'bar',
'--args', 'pong', '--',
'baz',
'--args', '--'], '--args')
(['foo', 'bar', 'baz'], ['ping', 'pong'])
Note: ``--`` cannot be used as the ``delimiting_option``.
:param list args: list of strings which are ordered arguments.
:param str delimiting_option: option which denotes where to split the args.
:returns: tuple of arguments before and after the delimiter.
:rtype: tuple
:raises: ValueError if the delimiting_option is ``--``.
"""
if delimiting_option == '--':
raise ValueError("Cannot use '--' as the delimiter")
if delimiting_option not in args:
return args, []
trimmed_args = args
extracted_args = []
# Loop through all arguments extracting groups of arguments
while True:
try:
next_delimiter = trimmed_args.index(delimiting_option)
except ValueError:
# No delimiter's left in the arguments, stop looking
break
# Capture and remove args after the delimiter
tail = trimmed_args[next_delimiter + 1:]
trimmed_args = trimmed_args[:next_delimiter]
# Look for a terminator, '--'
next_terminator = None
try:
next_terminator = tail.index('--')
except ValueError:
pass
if next_terminator is None:
# No terminator, put all args in extracted_args and stop looking
extracted_args.extend(tail)
break
else:
# Terminator found, put args up, but not including terminator
# in extracted_args
extracted_args.extend(tail[:next_terminator])
# And put arguments after the terminator back in trimmed_args
# then continue looking for additional delimiters
trimmed_args.extend(tail[next_terminator + 1:])
# Iterate through extracted args and shorted tokens with 3+ -'s only
for i, token in enumerate(extracted_args):
# '--' should have been removed from extracted_args in the above loop
assert token != '--', "this shouldn't happen"
# Skip single hyphens
if token == '-':
continue
# Check for non-hyphen characters
if [c for c in token if c != '-']:
# contains something other than -, continue
continue
# Must be only hyphens with more than two, Shorted by one -
extracted_args[i] = token[1:]
return trimmed_args, extracted_args
def compute_deploy_destination(context, filename, dst_subfolder=''):
return os.path.join(context.install_space, dst_subfolder, filename)
def deploy_file(
context,
source_base_path,
filename,
dst_subfolder='',
executable=False,
skip_if_exists=False,
):
# copy the file if not already there and identical
source_path = os.path.join(source_base_path, filename)
# create destination folder if necessary
destination_path = compute_deploy_destination(context, filename, dst_subfolder)
# If the file exists and we should skip if we didn't install it.
if (
(os.path.exists(destination_path) or os.path.islink(destination_path)) and
skip_if_exists
):
# If the dest is not a symlink or if it is but it doesn't point to our source.
if (
not os.path.islink(destination_path) or
not os.path.samefile(source_path, destination_path)
):
# Finally if the content is not the same.
if not filecmp.cmp(source_path, destination_path):
# We (probably) didn't install it and shouldn't overwrite it.
print('-- [ament] Skipping (would overwrite):', destination_path)
return
print('-- [ament] Deploying:', destination_path)
os.makedirs(os.path.dirname(destination_path), exist_ok=True)
# remove existing file / symlink if it is not already what is intended
if os.path.exists(destination_path):
if not context.symlink_install:
if os.path.islink(destination_path) or not filecmp.cmp(source_path, destination_path):
os.remove(destination_path)
else:
if not os.path.islink(destination_path) or \
not os.path.samefile(source_path, destination_path):
# try-catch to guard against a TOCTOU error that can happen during parallel build.
try:
os.remove(destination_path)
except OSError:
pass
if not os.path.exists(destination_path):
if not context.symlink_install:
shutil.copyfile(source_path, destination_path)
else:
# while the destination might not exist it can still be a symlink
if os.path.islink(destination_path):
os.remove(destination_path)
# try-catch to guard against a TOCTOU error that can happen during parallel build
try:
os.symlink(source_path, destination_path)
except OSError:
pass
# set executable bit if necessary
if executable and not context.symlink_install:
mode = os.stat(destination_path).st_mode
new_mode = mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if new_mode != mode:
os.chmod(destination_path, new_mode)
def quote_shell_command(cmd):
if os.name != 'nt':
return ' '.join([(shlex.quote(c) if c != '&&' else c) for c in cmd])
quoted = []
for c in cmd:
if ' ' in c:
c = '"%s"' % (c.replace('"', r'\"'))
quoted.append(c)
return ' '.join(quoted)
|
[] |
[] |
[
"MAKEFLAGS"
] |
[]
|
["MAKEFLAGS"]
|
python
| 1 | 0 | |
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/diff.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/apply/parse"
"k8s.io/kubernetes/pkg/kubectl/apply/strategy"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/utils/exec"
)
var (
diffLong = templates.LongDesc(i18n.T(`
Diff configurations specified by filename or stdin between their local,
last-applied, live and/or "merged" versions.
LOCAL and LIVE versions are diffed by default. Other availble keywords
are MERGED and LAST.
Output is always YAML.
KUBERNETES_EXTERNAL_DIFF environment variable can be used to select your own
diff command. By default, the "diff" command available in your path will be
run with "-u" (unicode) and "-N" (treat new files as empty) options.`))
diffExample = templates.Examples(i18n.T(`
# Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions
kubectl alpha diff -f pod.json
# When one version is specified, diff that version against LIVE
cat service.yaml | kubectl alpha diff -f - MERGED
# Or specify both versions
kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`))
)
type DiffOptions struct {
FilenameOptions resource.FilenameOptions
}
func isValidArgument(arg string) error {
switch arg {
case "LOCAL", "LIVE", "LAST", "MERGED":
return nil
default:
return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg)
}
}
func parseDiffArguments(args []string) (string, string, error) {
if len(args) > 2 {
return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.")
}
// Default values
from := "LOCAL"
to := "LIVE"
if len(args) > 0 {
from = args[0]
}
if len(args) > 1 {
to = args[1]
}
if err := isValidArgument(to); err != nil {
return "", "", err
}
if err := isValidArgument(from); err != nil {
return "", "", err
}
return from, to, nil
}
func NewCmdDiff(f cmdutil.Factory, stdout, stderr io.Writer) *cobra.Command {
var options DiffOptions
diff := DiffProgram{
Exec: exec.New(),
Stdout: stdout,
Stderr: stderr,
}
cmd := &cobra.Command{
Use: "diff -f FILENAME",
Short: i18n.T("Diff different versions of configurations"),
Long: diffLong,
Example: diffExample,
Run: func(cmd *cobra.Command, args []string) {
from, to, err := parseDiffArguments(args)
cmdutil.CheckErr(err)
cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to))
},
}
usage := "contains the configuration to diff"
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmd.MarkFlagRequired("filename")
return cmd
}
// DiffProgram finds and run the diff program. The value of
// KUBERNETES_EXTERNAL_DIFF environment variable will be used a diff
// program. By default, `diff(1)` will be used.
type DiffProgram struct {
Exec exec.Interface
Stdout io.Writer
Stderr io.Writer
}
func (d *DiffProgram) getCommand(args ...string) exec.Cmd {
diff := ""
if envDiff := os.Getenv("KUBERNETES_EXTERNAL_DIFF"); envDiff != "" {
diff = envDiff
} else {
diff = "diff"
args = append([]string{"-u", "-N"}, args...)
}
cmd := d.Exec.Command(diff, args...)
cmd.SetStdout(d.Stdout)
cmd.SetStderr(d.Stderr)
return cmd
}
// Run runs the detected diff program. `from` and `to` are the directory to diff.
func (d *DiffProgram) Run(from, to string) error {
d.getCommand(from, to).Run() // Ignore diff return code
return nil
}
// Printer is used to print an object.
type Printer struct{}
// Print the object inside the writer w.
func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error {
if obj == nil {
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
return err
}
_, err = w.Write(data)
return err
}
// DiffVersion gets the proper version of objects, and aggregate them into a directory.
type DiffVersion struct {
Dir *Directory
Name string
}
// NewDiffVersion creates a new DiffVersion with the named version.
func NewDiffVersion(name string) (*DiffVersion, error) {
dir, err := CreateDirectory(name)
if err != nil {
return nil, err
}
return &DiffVersion{
Dir: dir,
Name: name,
}, nil
}
func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) {
switch v.Name {
case "LIVE":
return obj.Live()
case "MERGED":
return obj.Merged()
case "LOCAL":
return obj.Local()
case "LAST":
return obj.Last()
}
return nil, fmt.Errorf("Unknown version: %v", v.Name)
}
// Print prints the object using the printer into a new file in the directory.
func (v *DiffVersion) Print(obj Object, printer Printer) error {
vobj, err := v.getObject(obj)
if err != nil {
return err
}
f, err := v.Dir.NewFile(obj.Name())
if err != nil {
return err
}
defer f.Close()
return printer.Print(vobj, f)
}
// Directory creates a new temp directory, and allows to easily create new files.
type Directory struct {
Name string
}
// CreateDirectory does create the actual disk directory, and return a
// new representation of it.
func CreateDirectory(prefix string) (*Directory, error) {
name, err := ioutil.TempDir("", prefix+"-")
if err != nil {
return nil, err
}
return &Directory{
Name: name,
}, nil
}
// NewFile creates a new file in the directory.
func (d *Directory) NewFile(name string) (*os.File, error) {
return os.OpenFile(filepath.Join(d.Name, name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
}
// Delete removes the directory recursively.
func (d *Directory) Delete() error {
return os.RemoveAll(d.Name)
}
// Object is an interface that let's you retrieve multiple version of
// it.
type Object interface {
Local() (map[string]interface{}, error)
Live() (map[string]interface{}, error)
Last() (map[string]interface{}, error)
Merged() (map[string]interface{}, error)
Name() string
}
// InfoObject is an implementation of the Object interface. It gets all
// the information from the Info object.
type InfoObject struct {
Info *resource.Info
Encoder runtime.Encoder
Parser *parse.Factory
}
var _ Object = &InfoObject{}
func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) {
m := map[string]interface{}{}
if len(data) == 0 {
return m, nil
}
err := json.Unmarshal(data, &m)
return m, err
}
func (obj InfoObject) Local() (map[string]interface{}, error) {
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Live() (map[string]interface{}, error) {
if obj.Info.Object == nil {
return nil, nil // Object doesn't exist on cluster.
}
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Merged() (map[string]interface{}, error) {
local, err := obj.Local()
if err != nil {
return nil, err
}
live, err := obj.Live()
if err != nil {
return nil, err
}
last, err := obj.Last()
if err != nil {
return nil, err
}
if live == nil || last == nil {
return local, nil // We probably don't have a live verison, merged is local.
}
elmt, err := obj.Parser.CreateElement(last, local, live)
if err != nil {
return nil, err
}
result, err := elmt.Merge(strategy.Create(strategy.Options{}))
return result.MergedResult.(map[string]interface{}), err
}
func (obj InfoObject) Last() (map[string]interface{}, error) {
if obj.Info.Object == nil {
return nil, nil // No object is live, return empty
}
accessor, err := meta.Accessor(obj.Info.Object)
if err != nil {
return nil, err
}
annots := accessor.GetAnnotations()
if annots == nil {
return nil, nil // Not an error, just empty.
}
return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation]))
}
func (obj InfoObject) Name() string {
return obj.Info.Name
}
// Differ creates two DiffVersion and diffs them.
type Differ struct {
From *DiffVersion
To *DiffVersion
}
func NewDiffer(from, to string) (*Differ, error) {
differ := Differ{}
var err error
differ.From, err = NewDiffVersion(from)
if err != nil {
return nil, err
}
differ.To, err = NewDiffVersion(to)
if err != nil {
differ.From.Dir.Delete()
return nil, err
}
return &differ, nil
}
// Diff diffs to versions of a specific object, and print both versions to directories.
func (d *Differ) Diff(obj Object, printer Printer) error {
if err := d.From.Print(obj, printer); err != nil {
return err
}
if err := d.To.Print(obj, printer); err != nil {
return err
}
return nil
}
// Run runs the diff program against both directories.
func (d *Differ) Run(diff *DiffProgram) error {
return diff.Run(d.From.Dir.Name, d.To.Dir.Name)
}
// TearDown removes both temporary directories recursively.
func (d *Differ) TearDown() {
d.From.Dir.Delete() // Ignore error
d.To.Dir.Delete() // Ignore error
}
// RunDiff uses the factory to parse file arguments, find the version to
// diff, and find each Info object for each files, and runs against the
// differ.
func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error {
openapi, err := f.OpenAPISchema()
if err != nil {
return err
}
parser := &parse.Factory{Resources: openapi}
differ, err := NewDiffer(from, to)
if err != nil {
return err
}
defer differ.TearDown()
printer := Printer{}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
r := f.NewBuilder().
Unstructured().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, &options.FilenameOptions).
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
if err := info.Get(); err != nil {
if !errors.IsNotFound(err) {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%v\nfrom server for:", info), info.Source, err)
}
info.Object = nil
}
obj := InfoObject{
Info: info,
Parser: parser,
Encoder: f.JSONEncoder(),
}
return differ.Diff(obj, printer)
})
if err != nil {
return err
}
differ.Run(diff)
return nil
}
|
[
"\"KUBERNETES_EXTERNAL_DIFF\""
] |
[] |
[
"KUBERNETES_EXTERNAL_DIFF"
] |
[]
|
["KUBERNETES_EXTERNAL_DIFF"]
|
go
| 1 | 0 | |
.docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# add flopy root directory to the python path
sys.path.insert(0, os.path.abspath(".."))
from flopy import __version__
# -- determine if running on readthedocs ------------------------------------
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# -- create source rst files ------------------------------------------------
cmd = "sphinx-apidoc -e -o source/ ../flopy/"
print(cmd)
os.system(cmd)
# -- programatically create rst files ---------------------------------------
cmd = ("python", "create_rstfiles.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
# -- convert the tutorial scripts -------------------------------------------
if not on_rtd:
cmd = ("python", "create_tutorials.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
# -- Project information -----------------------------------------------------
project = "flopy Documentation"
copyright = "2020, Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
author = "Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
# The version.
version = __version__
release = __version__
language = None
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"IPython.sphinxext.ipython_console_highlighting", # lowercase didn't work
"sphinx.ext.autosectionlabel",
"nbsphinx",
"nbsphinx_link",
"recommonmark",
]
# Settings for GitHub actions integration
if on_rtd:
extensions.append("rtds_action")
rtds_action_github_repo = "modflowpy/flopy"
rtds_action_path = "_notebooks"
rtds_action_artifact_prefix = "notebooks-for-"
rtds_action_github_token = os.environ.get("GITHUB_TOKEN", None)
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"github_url": "https://github.com/modflowpy/flopy",
"use_edit_page_button": False,
}
autosummary_generate = True
numpydoc_show_class_members = False
html_context = {
"github_user": "flopy",
"github_repo": "flopy",
"github_version": "master",
"doc_path": "doc",
}
html_css_files = [
"css/custom.css",
]
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "flopy"
html_favicon = "_images/flopylogo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = "flopydoc"
|
[] |
[] |
[
"GITHUB_TOKEN",
"READTHEDOCS"
] |
[]
|
["GITHUB_TOKEN", "READTHEDOCS"]
|
python
| 2 | 0 | |
pkg/s3/mounter.go
|
package s3
import (
"fmt"
"os"
"os/exec"
"k8s.io/klog/v2"
)
// Mounter interface
type Mounter interface {
Stage(stagePath string) error
Unstage(stagePath string) error
Mount(source string, target string) error
}
// newMounter returns a new mounter
func newMounter(meta *metadata, cfg *Config) Mounter {
return &s3fsMounter{
metadata: meta,
url: cfg.Endpoint,
region: cfg.Region,
pwFileContent: cfg.AccessKeyID + ":" + cfg.SecretAccessKey,
}
}
// Implements Mounter
type s3fsMounter struct {
metadata *metadata
url string
region string
pwFileContent string
}
const (
s3fsCmd = "s3fs"
)
func (s3fs *s3fsMounter) Stage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Unstage(stageTarget string) error {
return nil
}
func (s3fs *s3fsMounter) Mount(source string, target string) error {
if err := writes3fsPass(s3fs.pwFileContent); err != nil {
return err
}
args := []string{
fmt.Sprintf("%s:/%s", s3fs.metadata.Name, s3fs.metadata.FSPath),
target,
"-o", "use_path_request_style",
"-o", fmt.Sprintf("url=%s", s3fs.url),
"-o", fmt.Sprintf("endpoint=%s", s3fs.region),
"-o", "allow_other",
"-o", "mp_umask=000",
}
return fuseMount(s3fsCmd, args)
}
func writes3fsPass(pwFileContent string) error {
pwFileName := fmt.Sprintf("%s/.passwd-s3fs", os.Getenv("HOME"))
pwFile, err := os.OpenFile(pwFileName, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
_, err = pwFile.WriteString(pwFileContent)
if err != nil {
return err
}
pwFile.Close()
return nil
}
func fuseMount(command string, args []string) error {
cmd := exec.Command(command, args...)
klog.Infof("mounting fuse with command:%s with args:%s", command, args)
out, err := cmd.CombinedOutput()
if err != nil {
klog.Errorf("mounting fuse with command:%s with args:%s error:%s", command, args, string(out))
return fmt.Errorf("fuseMount command:%s with args:%s error:%s", command, args, string(out))
}
return nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
controllers/storagecluster/reconcile.go
|
package storagecluster
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
openshiftv1 "github.com/openshift/api/template/v1"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
ocsv1 "github.com/openshift/ocs-operator/api/v1"
statusutil "github.com/openshift/ocs-operator/controllers/util"
"github.com/openshift/ocs-operator/version"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// ReconcileStrategy is a string representing how we want to reconcile
// (or not) a particular resource
type ReconcileStrategy string
// StorageClassProvisionerType is a string representing StorageClass Provisioner. E.g: aws-ebs
type StorageClassProvisionerType string
type resourceManager interface {
ensureCreated(*StorageClusterReconciler, *ocsv1.StorageCluster) error
ensureDeleted(*StorageClusterReconciler, *ocsv1.StorageCluster) error
}
type ocsCephConfig struct{}
type ocsJobTemplates struct{}
const (
rookConfigMapName = "rook-config-override"
defaultRookConfigData = `
[global]
mon_osd_full_ratio = .85
mon_osd_backfillfull_ratio = .8
mon_osd_nearfull_ratio = .75
mon_max_pg_per_osd = 600
[osd]
osd_memory_target_cgroup_limit_ratio = 0.5
`
monCountOverrideEnvVar = "MON_COUNT_OVERRIDE"
// Name of MetadataPVCTemplate
metadataPVCName = "metadata"
// Name of WalPVCTemplate
walPVCName = "wal"
// ReconcileStrategyUnknown is the same as default
ReconcileStrategyUnknown ReconcileStrategy = ""
// ReconcileStrategyInit means reconcile once and ignore if it exists
ReconcileStrategyInit ReconcileStrategy = "init"
// ReconcileStrategyIgnore means never reconcile
ReconcileStrategyIgnore ReconcileStrategy = "ignore"
// ReconcileStrategyManage means always reconcile
ReconcileStrategyManage ReconcileStrategy = "manage"
// ReconcileStrategyStandalone also means never reconcile (NooBaa)
ReconcileStrategyStandalone ReconcileStrategy = "standalone"
// DeviceTypeSSD represents the DeviceType SSD
DeviceTypeSSD = "ssd"
// DeviceTypeHDD represents the DeviceType HDD
DeviceTypeHDD = "hdd"
// DeviceTypeNVMe represents the DeviceType NVMe
DeviceTypeNVMe = "nvme"
// AzureDisk represents Azure Premium Managed Disks provisioner for StorageClass
AzureDisk StorageClassProvisionerType = "kubernetes.io/azure-disk"
// EBS represents AWS EBS provisioner for StorageClass
EBS StorageClassProvisionerType = "kubernetes.io/aws-ebs"
)
var storageClusterFinalizer = "storagecluster.ocs.openshift.io"
const labelZoneRegionWithoutBeta = "failure-domain.kubernetes.io/region"
const labelZoneFailureDomainWithoutBeta = "failure-domain.kubernetes.io/zone"
const labelRookPrefix = "topology.rook.io"
var validTopologyLabelKeys = []string{
// This is the most preferred key as kubernetes recommends zone and region
// labels under this key.
corev1.LabelZoneRegionStable,
// These two are retained only to have backward compatibility; they are
// deprecated by kubernetes. If topology.kubernetes.io key has same label we
// will skip the next two from the topologyMap.
corev1.LabelZoneRegion,
labelZoneRegionWithoutBeta,
// This is the most preferred key as kubernetes recommends zone and region
// labels under this key.
corev1.LabelZoneFailureDomainStable,
// These two are retained only to have backward compatibility; they are
// deprecated by kubernetes. If topology.kubernetes.io key has same label we
// will skip the next two from the topologyMap.
corev1.LabelZoneFailureDomain,
labelZoneFailureDomainWithoutBeta,
// This is the kubernetes recommended label to select nodes.
corev1.LabelHostname,
// This label is used to assign rack based topology.
labelRookPrefix,
}
// +kubebuilder:rbac:groups=ocs.openshift.io,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=ceph.rook.io,resources=cephclusters;cephblockpools;cephfilesystems;cephobjectstores;cephobjectstoreusers,verbs=*
// +kubebuilder:rbac:groups=noobaa.io,resources=noobaas,verbs=*
// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=*
// +kubebuilder:rbac:groups=core,resources=pods;services;endpoints;persistentvolumeclaims;events;configmaps;secrets;nodes,verbs=*
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=*
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshots;volumesnapshotclasses,verbs=*
// +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=*
// +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures,verbs=get;list;watch
// +kubebuilder:rbac:groups=console.openshift.io,resources=consolequickstarts,verbs=*
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=*
// Reconcile reads that state of the cluster for a StorageCluster object and makes changes based on the state read
// and what is in the StorageCluster.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *StorageClusterReconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
prevLogger := r.Log
defer func() { r.Log = prevLogger }()
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
// Fetch the StorageCluster instance
sc := &ocsv1.StorageCluster{}
if err := r.Client.Get(context.TODO(), request.NamespacedName, sc); err != nil {
if errors.IsNotFound(err) {
r.Log.Info("No StorageCluster resource")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if err := r.validateStorageClusterSpec(sc, request); err != nil {
return reconcile.Result{}, err
}
// Reconcile changes to the cluster
result, reconcileError := r.reconcilePhases(sc, request)
// Apply status changes to the storagecluster
statusError := r.Client.Status().Update(context.TODO(), sc)
if statusError != nil {
r.Log.Info("Status Update Error", "StatusUpdateErr", "Could not update storagecluster status")
}
// Reconcile errors have higher priority than status update errors
if reconcileError != nil {
return result, reconcileError
} else if statusError != nil {
return result, statusError
} else {
return result, nil
}
}
func (r *StorageClusterReconciler) initializeImagesStatus(sc *ocsv1.StorageCluster) {
images := &sc.Status.Images
if images.Ceph == nil {
images.Ceph = &ocsv1.ComponentImageStatus{}
}
images.Ceph.DesiredImage = r.images.Ceph
if images.NooBaaCore == nil {
images.NooBaaCore = &ocsv1.ComponentImageStatus{}
}
images.NooBaaCore.DesiredImage = r.images.Ceph
if images.NooBaaDB == nil {
images.NooBaaDB = &ocsv1.ComponentImageStatus{}
}
images.NooBaaDB.DesiredImage = r.images.NooBaaDB
}
// validateStorageClusterSpec must be called before reconciling. Any syntactic and sematic errors in the CR must be caught here.
func (r *StorageClusterReconciler) validateStorageClusterSpec(instance *ocsv1.StorageCluster, request reconcile.Request) error {
if err := versionCheck(instance, r.Log); err != nil {
r.Log.Error(err, "Failed to validate version")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
if !instance.Spec.ExternalStorage.Enable {
if err := r.validateStorageDeviceSets(instance); err != nil {
r.Log.Error(err, "Failed to validate StorageDeviceSets")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
}
if err := validateArbiterSpec(instance, r.Log); err != nil {
r.Log.Error(err, "Failed to validate ArbiterSpec")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
return nil
}
func (r *StorageClusterReconciler) reconcilePhases(
instance *ocsv1.StorageCluster,
request reconcile.Request) (reconcile.Result, error) {
if instance.Spec.ExternalStorage.Enable {
r.Log.Info("Reconciling external StorageCluster")
} else {
r.Log.Info("Reconciling StorageCluster")
}
// Initialize the StatusImages section of the storageclsuter CR
r.initializeImagesStatus(instance)
// Check for active StorageCluster only if Create request is made
// and ignore it if there's another active StorageCluster
// If Update request is made and StorageCluster is PhaseIgnored, no need to
// proceed further
if instance.Status.Phase == "" {
isActive, err := r.isActiveStorageCluster(instance)
if err != nil {
r.Log.Error(err, "StorageCluster could not be reconciled. Retrying")
return reconcile.Result{}, err
}
if !isActive {
instance.Status.Phase = statusutil.PhaseIgnored
return reconcile.Result{}, nil
}
} else if instance.Status.Phase == statusutil.PhaseIgnored {
return reconcile.Result{}, nil
}
if instance.Status.Phase != statusutil.PhaseReady &&
instance.Status.Phase != statusutil.PhaseClusterExpanding &&
instance.Status.Phase != statusutil.PhaseDeleting &&
instance.Status.Phase != statusutil.PhaseConnecting {
instance.Status.Phase = statusutil.PhaseProgressing
}
// Add conditions if there are none
if instance.Status.Conditions == nil {
reason := ocsv1.ReconcileInit
message := "Initializing StorageCluster"
statusutil.SetProgressingCondition(&instance.Status.Conditions, reason, message)
}
// Check GetDeletionTimestamp to determine if the object is under deletion
if instance.GetDeletionTimestamp().IsZero() {
if !contains(instance.GetFinalizers(), storageClusterFinalizer) {
r.Log.Info("Finalizer not found for storagecluster. Adding finalizer")
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, storageClusterFinalizer)
if err := r.Client.Update(context.TODO(), instance); err != nil {
r.Log.Info("Update Error", "MetaUpdateErr", "Failed to update storagecluster with finalizer")
return reconcile.Result{}, err
}
}
if err := r.reconcileUninstallAnnotations(instance); err != nil {
return reconcile.Result{}, err
}
} else {
// The object is marked for deletion
instance.Status.Phase = statusutil.PhaseDeleting
if contains(instance.GetFinalizers(), storageClusterFinalizer) {
if err := r.deleteResources(instance); err != nil {
r.Log.Info("Uninstall in progress", "Status", err)
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonUninstallPending, err.Error())
return reconcile.Result{RequeueAfter: time.Second * time.Duration(1)}, nil
}
r.Log.Info("Removing finalizer")
// Once all finalizers have been removed, the object will be deleted
instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, storageClusterFinalizer)
if err := r.Client.Update(context.TODO(), instance); err != nil {
r.Log.Info("Update Error", "MetaUpdateErr", "Failed to remove finalizer from storagecluster")
return reconcile.Result{}, err
}
}
r.Log.Info("Object is terminated, skipping reconciliation")
return reconcile.Result{}, nil
}
if !instance.Spec.ExternalStorage.Enable {
// Get storage node topology labels
if err := r.reconcileNodeTopologyMap(instance); err != nil {
r.Log.Error(err, "Failed to set node topology map")
return reconcile.Result{}, err
}
}
// in-memory conditions should start off empty. It will only ever hold
// negative conditions (!Available, Degraded, Progressing)
r.conditions = nil
// Start with empty r.phase
r.phase = ""
var objs []resourceManager
if !instance.Spec.ExternalStorage.Enable {
// list of default ensure functions
objs = []resourceManager{
&ocsStorageClass{},
&ocsSnapshotClass{},
&ocsCephObjectStores{},
&ocsCephObjectStoreUsers{},
&ocsCephRGWRoutes{},
&ocsCephBlockPools{},
&ocsCephFilesystems{},
&ocsCephConfig{},
&ocsCephCluster{},
&ocsNoobaaSystem{},
&ocsJobTemplates{},
&ocsQuickStarts{},
}
} else {
// for external cluster, we have a different set of ensure functions
objs = []resourceManager{
&ocsExternalResources{},
&ocsCephCluster{},
&ocsSnapshotClass{},
&ocsNoobaaSystem{},
&ocsQuickStarts{},
}
}
for _, obj := range objs {
err := obj.ensureCreated(r, instance)
if r.phase == statusutil.PhaseClusterExpanding {
instance.Status.Phase = statusutil.PhaseClusterExpanding
} else if instance.Status.Phase != statusutil.PhaseReady &&
instance.Status.Phase != statusutil.PhaseConnecting {
instance.Status.Phase = statusutil.PhaseProgressing
}
if err != nil {
reason := ocsv1.ReconcileFailed
message := fmt.Sprintf("Error while reconciling: %v", err)
statusutil.SetErrorCondition(&instance.Status.Conditions, reason, message)
instance.Status.Phase = statusutil.PhaseError
// don't want to overwrite the actual reconcile failure
return reconcile.Result{}, err
}
}
// All component operators are in a happy state.
if r.conditions == nil {
r.Log.Info("No component operator reported negatively")
reason := ocsv1.ReconcileCompleted
message := ocsv1.ReconcileCompletedMessage
statusutil.SetCompleteCondition(&instance.Status.Conditions, reason, message)
// If no operator whose conditions we are watching reports an error, then it is safe
// to set readiness.
ReadinessSet()
if instance.Status.Phase != statusutil.PhaseClusterExpanding &&
!instance.Spec.ExternalStorage.Enable {
instance.Status.Phase = statusutil.PhaseReady
}
} else {
// If any component operator reports negatively we want to write that to
// the instance while preserving it's lastTransitionTime.
// For example, consider the resource has the Available condition
// type with type "False". When reconciling the resource we would
// add it to the in-memory representation of OCS's conditions (r.conditions)
// and here we are simply writing it back to the server.
// One shortcoming is that only one failure of a particular condition can be
// captured at one time (ie. if resource1 and resource2 are both reporting !Available,
// you will only see resource2q as it updates last).
for _, condition := range r.conditions {
conditionsv1.SetStatusCondition(&instance.Status.Conditions, condition)
}
reason := ocsv1.ReconcileCompleted
message := ocsv1.ReconcileCompletedMessage
conditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{
Type: ocsv1.ConditionReconcileComplete,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
// If for any reason we marked ourselves !upgradeable...then unset readiness
if conditionsv1.IsStatusConditionFalse(instance.Status.Conditions, conditionsv1.ConditionUpgradeable) {
ReadinessUnset()
}
if instance.Status.Phase != statusutil.PhaseClusterExpanding &&
!instance.Spec.ExternalStorage.Enable {
if conditionsv1.IsStatusConditionTrue(instance.Status.Conditions, conditionsv1.ConditionProgressing) {
instance.Status.Phase = statusutil.PhaseProgressing
} else if conditionsv1.IsStatusConditionFalse(instance.Status.Conditions, conditionsv1.ConditionUpgradeable) {
instance.Status.Phase = statusutil.PhaseNotReady
} else {
instance.Status.Phase = statusutil.PhaseError
}
}
}
// enable metrics exporter at the end of reconcile
// this allows storagecluster to be instantiated before
// scraping metrics
if instance.Spec.MetricsExporter == nil || ReconcileStrategy(instance.Spec.MetricsExporter.ReconcileStrategy) != ReconcileStrategyIgnore {
if err := r.enableMetricsExporter(instance); err != nil {
r.Log.Error(err, "failed to reconcile metrics exporter")
return reconcile.Result{}, err
}
if err := r.enablePrometheusRules(instance.Spec.ExternalStorage.Enable); err != nil {
r.Log.Error(err, "failed to reconcile prometheus rules")
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// versionCheck populates the `.Spec.Version` field
func versionCheck(sc *ocsv1.StorageCluster, reqLogger logr.Logger) error {
if sc.Spec.Version == "" {
sc.Spec.Version = version.Version
} else if sc.Spec.Version != version.Version { // check anything else only if the versions mis-match
storClustSemV1, err := semver.Make(sc.Spec.Version)
if err != nil {
reqLogger.Error(err, "Error while parsing Storage Cluster version")
return err
}
ocsSemV1, err := semver.Make(version.Version)
if err != nil {
reqLogger.Error(err, "Error while parsing OCS Operator version")
return err
}
// if the storage cluster version is higher than the invoking OCS Operator's version,
// return error
if storClustSemV1.GT(ocsSemV1) {
err = fmt.Errorf("Storage cluster version (%s) is higher than the OCS Operator version (%s)",
sc.Spec.Version, version.Version)
reqLogger.Error(err, "Incompatible Storage cluster version")
return err
}
// if the storage cluster version is less than the OCS Operator version,
// just update.
sc.Spec.Version = version.Version
}
return nil
}
// validateStorageDeviceSets checks the StorageDeviceSets of the given
// StorageCluster for completeness and correctness
func (r *StorageClusterReconciler) validateStorageDeviceSets(sc *ocsv1.StorageCluster) error {
for i, ds := range sc.Spec.StorageDeviceSets {
if ds.DataPVCTemplate.Spec.StorageClassName == nil || *ds.DataPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified", i)
}
if ds.MetadataPVCTemplate != nil {
if ds.MetadataPVCTemplate.Spec.StorageClassName == nil || *ds.MetadataPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified for metadataPVCTemplate", i)
}
}
if ds.WalPVCTemplate != nil {
if ds.WalPVCTemplate.Spec.StorageClassName == nil || *ds.WalPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified for walPVCTemplate", i)
}
}
if ds.DeviceType != "" {
if (DeviceTypeSSD != strings.ToLower(ds.DeviceType)) && (DeviceTypeHDD != strings.ToLower(ds.DeviceType)) && (DeviceTypeNVMe != strings.ToLower(ds.DeviceType)) {
return fmt.Errorf("failed to validate DeviceType %q: no Device of this type", ds.DeviceType)
}
}
}
return nil
}
// ensureCreated ensures that a ConfigMap resource exists with its Spec in
// the desired state.
func (obj *ocsCephConfig) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
reconcileStrategy := ReconcileStrategy(sc.Spec.ManagedResources.CephConfig.ReconcileStrategy)
if reconcileStrategy == ReconcileStrategyIgnore {
return nil
}
found := &corev1.ConfigMap{}
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: rookConfigMapName, Namespace: sc.Namespace}, found)
if err == nil && reconcileStrategy == ReconcileStrategyInit {
return nil
}
ownerRef := metav1.OwnerReference{
UID: sc.UID,
APIVersion: sc.APIVersion,
Kind: sc.Kind,
Name: sc.Name,
}
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: rookConfigMapName,
Namespace: sc.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerRef},
},
Data: map[string]string{
"config": defaultRookConfigData,
},
}
if err != nil {
if errors.IsNotFound(err) {
r.Log.Info("Creating Ceph ConfigMap")
err = r.Client.Create(context.TODO(), cm)
if err != nil {
return err
}
}
return err
}
ownerRefFound := false
for _, ownerRef := range found.OwnerReferences {
if ownerRef.UID == sc.UID {
ownerRefFound = true
}
}
val, ok := found.Data["config"]
if !ok || val != defaultRookConfigData || !ownerRefFound {
r.Log.Info("Updating Ceph ConfigMap")
return r.Client.Update(context.TODO(), cm)
}
return nil
}
// ensureDeleted is dummy func for the ocsCephConfig
func (obj *ocsCephConfig) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) error {
return nil
}
func (r *StorageClusterReconciler) isActiveStorageCluster(instance *ocsv1.StorageCluster) (bool, error) {
storageClusterList := ocsv1.StorageClusterList{}
// instance is already marked for deletion
// do not mark it as active
if !instance.GetDeletionTimestamp().IsZero() {
return false, nil
}
err := r.Client.List(context.TODO(), &storageClusterList, client.InNamespace(instance.Namespace))
if err != nil {
return false, fmt.Errorf("Error fetching StorageClusterList. %+v", err)
}
// There is only one StorageCluster i.e. instance
if len(storageClusterList.Items) == 1 {
return true, nil
}
// There are many StorageClusters. Check if this is Active
for n, storageCluster := range storageClusterList.Items {
if storageCluster.Status.Phase != statusutil.PhaseIgnored &&
storageCluster.ObjectMeta.Name != instance.ObjectMeta.Name {
// Both StorageClusters are in creation phase
// Tiebreak using CreationTimestamp and Alphanumeric ordering
if storageCluster.Status.Phase == "" {
if storageCluster.CreationTimestamp.Before(&instance.CreationTimestamp) {
return false, nil
} else if storageCluster.CreationTimestamp.Equal(&instance.CreationTimestamp) && storageCluster.Name < instance.Name {
return false, nil
}
if n == len(storageClusterList.Items)-1 {
return true, nil
}
continue
}
return false, nil
}
}
return true, nil
}
// Checks whether a string is contained within a slice
func contains(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
// Removes a given string from a slice and returns the new slice
func remove(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
// ensureCreated ensures if the osd removal job template exists
func (obj *ocsJobTemplates) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
osdCleanUpTemplate := &openshiftv1.Template{
ObjectMeta: metav1.ObjectMeta{
Name: "ocs-osd-removal",
Namespace: sc.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, osdCleanUpTemplate, func() error {
osdCleanUpTemplate.Objects = []runtime.RawExtension{
{
Object: newCleanupJob(sc),
},
}
osdCleanUpTemplate.Parameters = []openshiftv1.Parameter{
{
Name: "FAILED_OSD_IDS",
DisplayName: "OSD IDs",
Required: true,
Description: `
The parameter OSD IDs needs a comma-separated list of numerical FAILED_OSD_IDs
when a single job removes multiple OSDs.
If the expected comma-separated format is not used,
or an ID cannot be converted to an int,
or if an OSD ID is not found, errors will be generated in the log and no OSDs would be removed.`,
},
}
return controllerutil.SetControllerReference(sc, osdCleanUpTemplate, r.Scheme)
})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create Template: %v", err.Error())
}
return nil
}
// ensureDeleted is dummy func for the ocsJobTemplates
func (obj *ocsJobTemplates) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
return nil
}
func newCleanupJob(sc *ocsv1.StorageCluster) *batchv1.Job {
labels := map[string]string{
"app": "ceph-toolbox-job",
}
// Annotation template.alpha.openshift.io/wait-for-ready ensures template readiness
annotations := map[string]string{
"template.alpha.openshift.io/wait-for-ready": "true",
}
job := &batchv1.Job{
TypeMeta: metav1.TypeMeta{
Kind: "Job",
APIVersion: "batch/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "ocs-osd-removal-job",
Namespace: sc.Namespace,
Labels: labels,
Annotations: annotations,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ServiceAccountName: "rook-ceph-system",
Volumes: []corev1.Volume{
{
Name: "ceph-conf-emptydir",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
},
{
Name: "rook-config",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
},
},
Containers: []corev1.Container{
{
Name: "operator",
Image: os.Getenv("ROOK_CEPH_IMAGE"),
Args: []string{
"ceph",
"osd",
"remove",
"--osd-ids=${FAILED_OSD_IDS}",
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "ceph-conf-emptydir",
MountPath: "/etc/ceph",
},
{
Name: "rook-config",
MountPath: "/var/lib/rook",
},
},
Env: []corev1.EnvVar{
{
Name: "ROOK_MON_ENDPOINTS",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
Key: "data",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon-endpoints"},
},
},
},
{
Name: "POD_NAMESPACE",
Value: sc.Namespace,
},
{
Name: "ROOK_CEPH_USERNAME",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "ceph-username",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_CEPH_SECRET",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "ceph-secret",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_FSID",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "fsid",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_CONFIG_DIR",
Value: "/var/lib/rook",
},
{
Name: "ROOK_CEPH_CONFIG_OVERRIDE",
Value: "/etc/rook/config/override.conf",
},
{
Name: "ROOK_LOG_LEVEL",
Value: "DEBUG",
},
},
},
},
},
},
},
}
return job
}
func validateArbiterSpec(sc *ocsv1.StorageCluster, reqLogger logr.Logger) error {
if sc.Spec.Arbiter.Enable && sc.Spec.FlexibleScaling {
return fmt.Errorf("arbiter and flexibleScaling both can't be enabled")
}
if sc.Spec.Arbiter.Enable && sc.Spec.NodeTopologies.ArbiterLocation == "" {
return fmt.Errorf("arbiter is set to enable but no arbiterLocation has been provided in the Spec.NodeTopologies.ArbiterLocation")
}
return nil
}
|
[
"\"ROOK_CEPH_IMAGE\""
] |
[] |
[
"ROOK_CEPH_IMAGE"
] |
[]
|
["ROOK_CEPH_IMAGE"]
|
go
| 1 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_disableprivatekeys.py',
'wallet_disableprivatekeys.py --usecli',
'interface_http.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_zmq.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "magmelldollard"]) is not None:
print("%sWARNING!%s There is already a magmelldollard process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `magmelldollar-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TRAVIS"
] |
[]
|
["TRAVIS"]
|
python
| 1 | 0 | |
fylm/tests/make.py
|
# -*- coding: future_fstrings -*-
# Copyright 2018 Brandon Shelley. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, print_function
from builtins import *
import os
import io
import re
import shutil
import json
import random
# For tests on Travis, miniaturize filesizes.
# To force this in local tests, do:
# export TRAVIS=true
# export TMDB_KEY={key}
# To unset these:
# unset TRAVIS
kb = 1 if os.environ.get('TRAVIS') is not None else 1024
mb = kb * 1024
gb = mb * 1024
class MockFilm:
def __init__(self, expected_title, expected_id, acceptable_names):
self.expected_title = expected_title
self.expected_id = expected_id
self.acceptable_names = acceptable_names
class MakeFilmsResult:
def __init__(self, all_test_films, expected, expected_no_lookup, ignored):
self.all_test_films = all_test_films
self.expected = expected
self.expected_no_lookup = expected_no_lookup
self.ignored = ignored
def make_mock_file(path, size):
# Create an empty file that appears to the system to be the size of `size`.
try:
# Try to create the folder structure for the path just in case it doesn't exist
os.makedirs(os.path.dirname(path))
except Exception as e:
pass
# Force size to be an integer
size = int(round(size))
f = open(path, 'wb')
f.seek(size)
f.write(b'\0')
f.close()
def make_mock_files(json_path, files_path):
global gb
global mb
all_test_films = []
expected = []
expected_no_lookup = []
ignored = []
search_dir = os.path.join(os.path.dirname(__file__), files_path)
json_path = os.path.join(os.path.dirname(__file__), os.path.basename(json_path))
# Clean up first
try:
shutil.rmtree(search_dir)
except Exception:
pass
try:
os.makedirs(search_dir)
except Exception:
pass
with io.open(json_path, mode="r", encoding="utf-8") as json_data:
test_films = json.load(json_data)['test_films']
for test_film in test_films:
# Skip this test film if the skip property is set
if 'skip' in test_film and test_film['skip'] is True:
continue
if 'dir' in test_film:
os.makedirs(os.path.join(search_dir, test_film['dir']))
acceptable_names = []
for tf in test_film['files']:
parent_dir = test_film['dir'] if 'dir' in test_film else ''
file = os.path.join(search_dir, parent_dir, tf['filename'])
if 'expect_no_lookup' in tf:
# Add the expected filename to expected_no_lookup[]:
expected_no_lookup.append(tf['expect_no_lookup'])
acceptable_names.append(tf['expect_no_lookup'])
if 'expect' in tf and tf['expect'] is not None:
# Add the expected filename to expected[] and ..no_lookup[]:
expected.append(tf['expect'])
expected_no_lookup.append(tf['expect'])
acceptable_names.append(tf['expect'])
else:
ignored.append(tf['filename'])
size_2160p = random.randrange(int(35 * gb), int(55 * gb))
size_1080p = random.randrange(int(7 * gb), int(15 * gb))
size_720p = random.randrange(int(4 * gb), int(8 * gb))
size_sd = random.randrange(int(750 * mb), int(1300 * mb))
size_sample = random.randrange(10 * mb, 50 * mb)
if (re.search(re.compile(r'\bsample', re.I), file)
or os.path.basename(file) == 'ETRG.mp4'):
size = size_sample
elif re.search(re.compile(r'720p?', re.I), file):
size = size_720p
elif re.search(re.compile(r'1080p?', re.I), file):
size = size_1080p
elif re.search(re.compile(r'(2160p?|\b4K)', re.I), file):
size = size_2160p
elif os.path.splitext(file)[1] in ['.avi', '.mp4']:
size = size_sd
else:
size = size_1080p
# Create an empty file that appears to the system to be
# a random size akin to the qulity of the film.
make_mock_file(file, size)
tmdb_id = test_film['tmdb_id'] if 'tmdb_id' in test_film else None
title = test_film['title'] if 'title' in test_film else None
all_test_films.append(MockFilm(title, tmdb_id, acceptable_names))
return MakeFilmsResult(all_test_films, expected, expected_no_lookup, ignored)
if __name__ == '__main__':
make_mock_files('files.json', 'files/#new/')
|
[] |
[] |
[
"TRAVIS"
] |
[]
|
["TRAVIS"]
|
python
| 1 | 0 | |
vendor/github.com/mackerelio/golib/cmd/mackerel-github-release/main.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"github.com/github/hub/github"
"github.com/mitchellh/go-homedir"
"github.com/octokit/go-octokit/octokit"
)
const (
exitOK = iota
exitError
)
const version = "0.0.0"
func main() {
os.Exit(run(os.Args[1:]))
}
func run(argv []string) int {
remotes, err := github.Remotes()
if err != nil || len(remotes) < 1 {
log.Printf("can't detect remote repository: %#v\n", err)
return exitError
}
proj, err := remotes[0].Project()
if err != nil {
log.Printf("failed to retrieve project: %#v\n", err)
return exitError
}
fs := flag.NewFlagSet("mackerel-github-release", flag.ContinueOnError)
var (
dryRun = fs.Bool("dry-run", false, "dry-run mode")
staging = fs.Bool("staging", false, "staging release")
)
err = fs.Parse(argv)
if err != nil {
if err == flag.ErrHelp {
return exitOK
}
return exitError
}
out, err := exec.Command("gobump", "show").Output()
if err != nil {
log.Printf("failed to `gobump show`: %#v\n", err)
return exitError
}
var v struct {
Version string `json:"version"`
}
err = json.Unmarshal(out, &v)
if err != nil {
log.Printf("failed to unmarshal `gobump show`'s output: %#v\n", err)
return exitError
}
err = uploadToGithubRelease(proj, v.Version, *staging, *dryRun)
if err != nil {
log.Printf("error occured while uploading artifacts to github: %#v\n", err)
return exitError
}
return exitOK
}
var errAlreadyReleased = fmt.Errorf("the release of this version has already existed at GitHub Relase, so skip the process")
func uploadToGithubRelease(proj *github.Project, releaseVer string, staging, dryRun bool) error {
tag := "staging"
if !staging {
tag = "v" + releaseVer
}
repo, owner := proj.Name, proj.Owner
octoCli := getOctoCli()
pr, err := getReleasePullRequest(octoCli, owner, repo, releaseVer)
if err != nil {
return err
}
err = handleOldRelease(octoCli, owner, repo, tag, staging, dryRun)
if err != nil {
if err == errAlreadyReleased {
log.Println(err.Error())
return nil
}
return err
}
body := pr.Body
assets, err := collectAssets()
if err != nil {
return fmt.Errorf("error occured while collecting releasing assets: %#v", err)
}
host, err := github.CurrentConfig().PromptForHost(proj.Host)
if err != nil {
return fmt.Errorf("failed to detect github config: %#v", err)
}
gh := github.NewClientWithHost(host)
if !dryRun {
params := &github.Release{
TagName: tag,
Name: tag,
Body: body,
Prerelease: true,
}
release, err := gh.CreateRelease(proj, params)
if err != nil {
return fmt.Errorf("failed to create release: %#v", err)
}
err = uploadAssets(gh, release, assets)
if err != nil {
return err
}
if !staging {
release, err = gh.EditRelease(release, map[string]interface{}{
"prerelease": false,
})
}
}
return nil
}
func getOctoCli() *octokit.Client {
var auth octokit.AuthMethod
token := os.Getenv("GITHUB_TOKEN")
if token != "" {
auth = octokit.TokenAuth{AccessToken: token}
}
return octokit.NewClient(auth)
}
func getReleasePullRequest(octoCli *octokit.Client, owner, repo, releaseVer string) (*octokit.PullRequest, error) {
releaseBranch := "bump-version-" + releaseVer
u, err := octokit.PullRequestsURL.Expand(octokit.M{"owner": owner, "repo": repo})
if err != nil {
return nil, fmt.Errorf("something went wrong while expanding pullrequest url")
}
q := u.Query()
q.Set("state", "closed")
q.Set("head", fmt.Sprintf("%s:%s", owner, releaseBranch))
u.RawQuery = q.Encode()
prs, r := octoCli.PullRequests(u).All()
if r.HasError() || len(prs) != 1 {
return nil, fmt.Errorf("failed to detect release pull request: %#v", r.Err)
}
return &prs[0], nil
}
func handleOldRelease(octoCli *octokit.Client, owner, repo, tag string, staging, dryRun bool) error {
releaseByTagURL := octokit.Hyperlink("repos/{owner}/{repo}/releases/tags/{tag}")
u, err := releaseByTagURL.Expand(octokit.M{"owner": owner, "repo": repo, "tag": tag})
if err != nil {
return fmt.Errorf("failed to build GitHub URL: %#v", err)
}
release, r := octoCli.Releases(u).Latest()
if r.Err != nil {
rerr, ok := r.Err.(*octokit.ResponseError)
if !ok {
return fmt.Errorf("failed to fetch release: %#v", r.Err)
}
if rerr.Response == nil || rerr.Response.StatusCode != http.StatusNotFound {
return fmt.Errorf("failed to fetch release: %#v", r.Err)
}
}
if release != nil {
if !staging {
return errAlreadyReleased
}
if !dryRun {
req, err := octoCli.NewRequest(release.URL)
if err != nil {
return fmt.Errorf("something went wrong: %#v", err)
}
sawyerResp := req.Request.Delete()
if sawyerResp.IsError() {
return fmt.Errorf("release deletion unsuccesful, %#v", sawyerResp.ResponseError)
}
defer sawyerResp.Body.Close()
if sawyerResp.StatusCode != http.StatusNoContent {
return fmt.Errorf("could not delete the release corresponding to tag %s", tag)
}
}
}
return nil
}
func collectAssets() (assets []string, err error) {
home, err := homedir.Dir()
if err != nil {
return nil, err
}
for _, glob := range [...]string{
home + "/rpmbuild/RPMS/*/*.rpm",
"rpmbuild/RPMS/*/*.rpm",
"packaging/*.deb",
"snapshot/*.zip",
"snapshot/*.tar.gz",
"build/*.tar.gz",
} {
files, err := filepath.Glob(glob)
if err != nil {
return nil, err
}
assets = append(assets, files...)
}
return assets, nil
}
func uploadAssets(gh *github.Client, release *github.Release, assets []string) error {
for _, asset := range assets {
_, err := gh.UploadReleaseAsset(release, asset, "")
if err != nil {
return fmt.Errorf("failed to upload asset: %s, error: %#v", asset, err)
}
}
return nil
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
numpy/distutils/fcompiler/gnu.py
|
import re
import os
import sys
import warnings
import platform
import tempfile
import hashlib
import base64
import subprocess
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77', )
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n') + 1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(
r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from sysconfig and then
# fall back to setting it to 10.9 This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import sysconfig
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if not target:
target = '10.9'
s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
warnings.warn(s, stacklevel=2)
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
try:
output = subprocess.check_output(self.compiler_f77 +
['-print-libgcc-file-name'])
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
return os.path.dirname(output)
return None
def get_libgfortran_dir(self):
if sys.platform[:5] == 'linux':
libgfortran_name = 'libgfortran.so'
elif sys.platform == 'darwin':
libgfortran_name = 'libgfortran.dylib'
else:
libgfortran_name = None
libgfortran_dir = None
if libgfortran_name:
find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
try:
output = subprocess.check_output(
self.compiler_f77 + find_lib_arg)
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
libgfortran_dir = os.path.dirname(output)
return libgfortran_dir
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir, ) * 4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
opt.append('gcc')
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
if sys.platform[:3] == 'aix' or sys.platform == 'win32':
# Linux/Solaris/Unix support RPATH, Windows and AIX do not
raise NotImplementedError
# TODO: could use -Xlinker here, if it's supported
assert "," not in dir
sep = ',' if sys.platform == 'darwin' else '='
return '-Wl,-rpath%s%s' % (sep, dir)
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran', )
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in [
'version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe'
]:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
if sys.platform[:3] == 'aix':
executables['linker_so'].append('-lpthread')
if platform.architecture()[0][:2] == '64':
for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
executables[key].append('-maix64')
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir, ) * 4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i + 1, "mingwex")
opt.insert(i + 1, "mingw32")
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
try:
output = subprocess.check_output(self.compiler_f77 + ['-v'])
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def _hash_files(self, filenames):
h = hashlib.sha1()
for fn in filenames:
with open(fn, 'rb') as f:
while True:
block = f.read(131072)
if not block:
break
h.update(block)
text = base64.b32encode(h.digest())
text = text.decode('ascii')
return text.rstrip('=')
def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
chained_dlls, is_archive):
"""Create a wrapper shared library for the given objects
Return an MSVC-compatible lib
"""
c_compiler = self.c_compiler
if c_compiler.compiler_type != "msvc":
raise ValueError("This method only supports MSVC")
object_hash = self._hash_files(list(objects) + list(chained_dlls))
if is_win64():
tag = 'win_amd64'
else:
tag = 'win32'
basename = 'lib' + os.path.splitext(
os.path.basename(objects[0]))[0][:8]
root_name = basename + '.' + object_hash + '.gfortran-' + tag
dll_name = root_name + '.dll'
def_name = root_name + '.def'
lib_name = root_name + '.lib'
dll_path = os.path.join(extra_dll_dir, dll_name)
def_path = os.path.join(output_dir, def_name)
lib_path = os.path.join(output_dir, lib_name)
if os.path.isfile(lib_path):
# Nothing to do
return lib_path, dll_path
if is_archive:
objects = (["-Wl,--whole-archive"] + list(objects) +
["-Wl,--no-whole-archive"])
self.link_shared_object(
objects,
dll_name,
output_dir=extra_dll_dir,
extra_postargs=list(chained_dlls) + [
'-Wl,--allow-multiple-definition',
'-Wl,--output-def,' + def_path,
'-Wl,--export-all-symbols',
'-Wl,--enable-auto-import',
'-static',
'-mlong-double-64',
])
# No PowerPC!
if is_win64():
specifier = '/MACHINE:X64'
else:
specifier = '/MACHINE:X86'
# MSVC specific code
lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
if not c_compiler.initialized:
c_compiler.initialize()
c_compiler.spawn([c_compiler.lib] + lib_args)
return lib_path, dll_path
def can_ccompiler_link(self, compiler):
# MSVC cannot link objects compiled by GNU fortran
return compiler.compiler_type not in ("msvc", )
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
"""
if self.c_compiler.compiler_type == "msvc":
# Compile a DLL and return the lib for the DLL as
# the object. Also keep track of previous DLLs that
# we have compiled so that we can link against them.
# If there are .a archives, assume they are self-contained
# static libraries, and build separate DLLs for each
archives = []
plain_objects = []
for obj in objects:
if obj.lower().endswith('.a'):
archives.append(obj)
else:
plain_objects.append(obj)
chained_libs = []
chained_dlls = []
for archive in archives[::-1]:
lib, dll = self._link_wrapper_lib(
[archive],
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=True)
chained_libs.insert(0, lib)
chained_dlls.insert(0, dll)
if not plain_objects:
return chained_libs
lib, dll = self._link_wrapper_lib(
plain_objects,
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=False)
return [lib] + chained_libs
else:
raise ValueError("Unsupported C compiler")
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
os.close(fid)
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('gnu').get_version())
try:
print(customized_fcompiler('g95').get_version())
except Exception as e:
print(e)
|
[] |
[] |
[
"MACOSX_DEPLOYMENT_TARGET"
] |
[]
|
["MACOSX_DEPLOYMENT_TARGET"]
|
python
| 1 | 0 | |
api/api_test.go
|
package api
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const serverStartupMillis = 1000
const testUsername = "username"
const testPassword = "Super@35Secure"
const testCreateUserPassword = "examplepassword"
const testBitcoinP2PKHAddress = "1DxBaADfhTSWsevbzDghrhKSqQwsBpuM5A"
const testDomain = "example.com"
const TestNanoAddress = "xrb_3xnpp3eh6fhnfztx46ypubizd5q1fgds3dbbkp5ektwut3tumrykyx6u5qpd"
func TestAPISuccess(t *testing.T) {
cfg := Config{}
err := cfg.InitDB()
assert.Nil(t, err)
err = cfg.SetupDB()
assert.Nil(t, err)
err = cfg.db.Close()
assert.Nil(t, err)
server := Start()
defer server.Shutdown(nil)
time.Sleep(serverStartupMillis * time.Millisecond) //wait for server to start
// Create a user
url := "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/users"
params := []byte(`{
"alias": "` + testUsername + `$` + testDomain + `",
"password": "` + testPassword + `",
"create_user_password": "` + testCreateUserPassword + `"
}`)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Login to that user
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/auth"
params = []byte(`{
"alias": "` + testUsername + "$" + testDomain + `",
"password": "` + testPassword + `"
}`)
req, err = http.NewRequest("POST", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
authResponse := postAuthResponse{}
err = json.Unmarshal(body, &authResponse)
assert.Nil(t, err)
// Add a Bitcoin address
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses"
params = []byte(`{
"address_type": 100,
"address": "` + testBitcoinP2PKHAddress + `"
}`)
req, err = http.NewRequest("PUT", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+authResponse.Token)
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Add a Nano address
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses"
params = []byte(`{
"address_type": 300,
"address": "` + TestNanoAddress + `"
}`)
req, err = http.NewRequest("PUT", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+authResponse.Token)
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Get an address
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses?alias=" + testUsername + "$" + testDomain + "&address_type=100"
req, err = http.NewRequest("GET", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Get addresses
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses?alias=" + testUsername + "$" + testDomain
req, err = http.NewRequest("GET", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
assert.True(t, len(body) > 0)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
assert.True(t, len(body) > 0)
// Delete an address
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses/100"
req, err = http.NewRequest("DELETE", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+authResponse.Token)
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
// Delete an address
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses/300"
req, err = http.NewRequest("DELETE", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+authResponse.Token)
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Get address and fail
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/addresses?alias=" + testUsername + "$" + testDomain + "&address_type=100"
req, err = http.NewRequest("GET", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 404, resp.StatusCode)
// Delete the user
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/users"
req, err = http.NewRequest("DELETE", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+authResponse.Token)
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
assert.Nil(t, err)
// Fail the login
url = "http://127.0.0.1:" + os.Getenv("TEST_PORT") + "/v1/auth"
params = []byte(`{
"alias": "` + testUsername + "$" + testDomain + `",
"password": "` + testPassword + `"
}`)
req, err = http.NewRequest("POST", url, bytes.NewBuffer(params))
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err = client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, 400, resp.StatusCode)
}
|
[
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\"",
"\"TEST_PORT\""
] |
[] |
[
"TEST_PORT"
] |
[]
|
["TEST_PORT"]
|
go
| 1 | 0 | |
setup.py
|
import distutils.command.clean
import glob
import os
import shutil
import subprocess
import sys
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import (
BuildExtension,
CppExtension,
CUDAExtension,
CUDA_HOME,
)
version = open("version.txt", "r").read().strip()
sha = "Unknown"
package_name = "torchcsprng"
cwd = os.path.dirname(os.path.abspath(__file__))
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except Exception:
pass
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
def write_version_file():
version_path = os.path.join(cwd, "torchcsprng", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
# f.write("from torchcsprng.extension import _check_cuda_version\n")
# f.write("if _check_cuda_version() > 0:\n")
# f.write(" cuda = _check_cuda_version()\n")
write_version_file()
with open("README.md", "r") as fh:
long_description = fh.read()
pytorch_dep = "torch"
if os.getenv("PYTORCH_VERSION"):
pytorch_dep += "==" + os.getenv("PYTORCH_VERSION")
requirements = [
pytorch_dep,
]
def append_flags(flags, flags_to_append):
for flag in flags_to_append:
if not flag in flags:
flags.append(flag)
return flags
def get_extensions():
build_cuda = torch.cuda.is_available() or os.getenv("FORCE_CUDA", "0") == "1"
module_name = "torchcsprng"
extensions_dir = os.path.join(cwd, module_name, "csrc")
openmp = "ATen parallel backend: OpenMP" in torch.__config__.parallel_info()
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
sources = main_file + source_cpu
extension = CppExtension
define_macros = []
cxx_flags = os.getenv("CXX_FLAGS", "")
if cxx_flags == "":
cxx_flags = []
else:
cxx_flags = cxx_flags.split(" ")
if openmp:
if sys.platform == "linux":
cxx_flags = append_flags(cxx_flags, ["-fopenmp"])
elif sys.platform == "win32":
cxx_flags = append_flags(cxx_flags, ["/openmp"])
# elif sys.platform == 'darwin':
# cxx_flags = append_flags(cxx_flags, ['-Xpreprocessor', '-fopenmp'])
if build_cuda:
extension = CUDAExtension
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
nvcc_flags = append_flags(nvcc_flags, ["--expt-extended-lambda", "-Xcompiler"])
extra_compile_args = {
"cxx": cxx_flags,
"nvcc": nvcc_flags,
}
else:
extra_compile_args = {
"cxx": cxx_flags,
}
ext_modules = [
extension(
module_name + "._C",
sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open(".gitignore", "r") as f:
ignores = f.read()
start_deleting = False
for wildcard in filter(None, ignores.split("\n")):
if (
wildcard
== "# do not change or delete this comment - `python setup.py clean` deletes everything after this line"
):
start_deleting = True
if not start_deleting:
continue
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
setup(
# Metadata
name=package_name,
version=version,
author="Pavel Belevich",
author_email="[email protected]",
url="https://github.com/pytorch/csprng",
description="Cryptographically secure pseudorandom number generators for PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
license="BSD-3",
# Package info
packages=find_packages(exclude=("test",)),
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.6",
install_requires=requirements,
ext_modules=get_extensions(),
test_suite="test",
cmdclass={
"build_ext": BuildExtension,
"clean": clean,
},
)
|
[] |
[] |
[
"CXX_FLAGS",
"FORCE_CUDA",
"BUILD_VERSION",
"NVCC_FLAGS",
"PYTORCH_VERSION"
] |
[]
|
["CXX_FLAGS", "FORCE_CUDA", "BUILD_VERSION", "NVCC_FLAGS", "PYTORCH_VERSION"]
|
python
| 5 | 0 | |
upup/pkg/fi/cloudup/template_functions.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/******************************************************************************
Template Functions are what map functions in the models, to internal logic in
kops. This is the point where we connect static YAML configuration to dynamic
runtime values in memory.
When defining a new function:
- Build the new function here
- Define the new function in AddTo()
dest["MyNewFunction"] = MyNewFunction // <-- Function Pointer
******************************************************************************/
package cloudup
import (
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"text/template"
"github.com/Masterminds/sprig"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
kopscontrollerconfig "k8s.io/kops/cmd/kops-controller/pkg/config"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/resources/spotinst"
"k8s.io/kops/pkg/wellknownports"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/util/pkg/env"
)
// TemplateFunctions provides a collection of methods used throughout the templates
type TemplateFunctions struct {
model.KopsModelContext
tags sets.String
}
// AddTo defines the available functions we can use in our YAML models.
// If we are trying to get a new function implemented it MUST
// be defined here.
func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretStore) (err error) {
cluster := tf.Cluster
dest["EtcdScheme"] = tf.EtcdScheme
dest["SharedVPC"] = tf.SharedVPC
dest["ToJSON"] = tf.ToJSON
dest["UseBootstrapTokens"] = tf.UseBootstrapTokens
dest["UseEtcdTLS"] = tf.UseEtcdTLS
// Remember that we may be on a different arch from the target. Hard-code for now.
dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1)
}
dest["join"] = func(a []string, sep string) string {
return strings.Join(a, sep)
}
sprigTxtFuncMap := sprig.TxtFuncMap()
dest["indent"] = sprigTxtFuncMap["indent"]
dest["ClusterName"] = tf.ClusterName
dest["HasTag"] = tf.HasTag
dest["WithDefaultBool"] = func(v *bool, defaultValue bool) bool {
if v != nil {
return *v
}
return defaultValue
}
dest["GetInstanceGroup"] = tf.GetInstanceGroup
dest["CloudTags"] = tf.CloudTagsForInstanceGroup
dest["KubeDNS"] = func() *kops.KubeDNSConfig {
return cluster.Spec.KubeDNS
}
dest["NodeLocalDNSClusterIP"] = func() string {
if cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return cluster.Spec.KubeDNS.ServerIP
}
return "__PILLAR__CLUSTER__DNS__"
}
dest["NodeLocalDNSServerIP"] = func() string {
if cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return ""
}
return cluster.Spec.KubeDNS.ServerIP
}
dest["NodeLocalDNSHealthCheck"] = func() string {
return fmt.Sprintf("%d", wellknownports.NodeLocalDNSHealthCheck)
}
dest["KopsControllerArgv"] = tf.KopsControllerArgv
dest["KopsControllerConfig"] = tf.KopsControllerConfig
dest["DnsControllerArgv"] = tf.DNSControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDNSArgv
dest["CloudControllerConfigArgv"] = tf.CloudControllerConfigArgv
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
dest["Region"] = func() string {
return tf.Region
}
if featureflag.EnableExternalCloudController.Enabled() {
// will return openstack external ccm image location for current kubernetes version
dest["OpenStackCCM"] = tf.OpenStackCCM
}
dest["ProxyEnv"] = tf.ProxyEnv
dest["KopsSystemEnv"] = tf.KopsSystemEnv
dest["DO_TOKEN"] = func() string {
return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN")
}
if featureflag.Spotinst.Enabled() {
if creds, err := spotinst.LoadCredentials(); err == nil {
dest["SpotinstToken"] = func() string { return creds.Token }
dest["SpotinstAccount"] = func() string { return creds.Account }
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Flannel != nil {
flannelBackendType := cluster.Spec.Networking.Flannel.Backend
if flannelBackendType == "" {
klog.Warningf("Defaulting flannel backend to udp (not a recommended configuration)")
flannelBackendType = "udp"
}
dest["FlannelBackendType"] = func() string { return flannelBackendType }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Weave != nil {
weavesecretString := ""
weavesecret, _ := secretStore.Secret("weavepassword")
if weavesecret != nil {
weavesecretString, err = weavesecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Weave secret function successfully registered")
}
dest["WeaveSecret"] = func() string { return weavesecretString }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Cilium != nil {
ciliumsecretString := ""
ciliumsecret, _ := secretStore.Secret("ciliumpassword")
if ciliumsecret != nil {
ciliumsecretString, err = ciliumsecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Cilium secret function successfully registered")
}
dest["CiliumSecret"] = func() string { return ciliumsecretString }
}
return nil
}
// ToJSON returns a json representation of the struct or on error an empty string
func (tf *TemplateFunctions) ToJSON(data interface{}) string {
encoded, err := json.Marshal(data)
if err != nil {
return ""
}
return string(encoded)
}
// EtcdScheme parses and grabs the protocol to the etcd cluster
func (tf *TemplateFunctions) EtcdScheme() string {
if tf.UseEtcdTLS() {
return "https"
}
return "http"
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (tf *TemplateFunctions) SharedVPC() bool {
return tf.Cluster.SharedVPC()
}
// HasTag returns true if the specified tag is set
func (tf *TemplateFunctions) HasTag(tag string) bool {
_, found := tf.tags[tag]
return found
}
// GetInstanceGroup returns the instance group with the specified name
func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, error) {
ig := tf.KopsModelContext.FindInstanceGroup(name)
if ig == nil {
return nil, fmt.Errorf("InstanceGroup %q not found", name)
}
return ig, nil
}
// CloudControllerConfigArgv returns the args to external cloud controller
func (tf *TemplateFunctions) CloudControllerConfigArgv() ([]string, error) {
cluster := tf.Cluster
if cluster.Spec.ExternalCloudControllerManager == nil {
return nil, fmt.Errorf("ExternalCloudControllerManager is nil")
}
var argv []string
if cluster.Spec.ExternalCloudControllerManager.Master != "" {
argv = append(argv, fmt.Sprintf("--master=%s", cluster.Spec.ExternalCloudControllerManager.Master))
}
if cluster.Spec.ExternalCloudControllerManager.LogLevel != 0 {
argv = append(argv, fmt.Sprintf("--v=%d", cluster.Spec.ExternalCloudControllerManager.LogLevel))
} else {
argv = append(argv, "--v=2")
}
if cluster.Spec.ExternalCloudControllerManager.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.ExternalCloudControllerManager.CloudProvider))
} else if cluster.Spec.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.CloudProvider))
} else {
return nil, fmt.Errorf("Cloud Provider is not set")
}
if cluster.Spec.ExternalCloudControllerManager.ClusterName != "" {
argv = append(argv, fmt.Sprintf("--cluster-name=%s", cluster.Spec.ExternalCloudControllerManager.ClusterName))
}
if cluster.Spec.ExternalCloudControllerManager.ClusterCIDR != "" {
argv = append(argv, fmt.Sprintf("--cluster-cidr=%s", cluster.Spec.ExternalCloudControllerManager.ClusterCIDR))
}
if cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs != nil {
argv = append(argv, fmt.Sprintf("--allocate-node-cidrs=%t", *cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs))
}
if cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes != nil {
argv = append(argv, fmt.Sprintf("--configure-cloud-routes=%t", *cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes))
}
if cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != nil && *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != "" {
argv = append(argv, fmt.Sprintf("--cidr-allocator-type=%s", *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType))
}
if cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials != nil {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", *cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials))
} else {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", true))
}
return argv, nil
}
// DNSControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) {
cluster := tf.Cluster
var argv []string
argv = append(argv, "/usr/bin/dns-controller")
// @check if the dns controller has custom configuration
if cluster.Spec.ExternalDNS == nil {
argv = append(argv, []string{"--watch-ingress=false"}...)
klog.V(4).Infof("watch-ingress=false set on dns-controller")
} else {
// @check if the watch ingress is set
var watchIngress bool
if cluster.Spec.ExternalDNS.WatchIngress != nil {
watchIngress = fi.BoolValue(cluster.Spec.ExternalDNS.WatchIngress)
}
if watchIngress {
klog.Warningln("--watch-ingress=true set on dns-controller")
klog.Warningln("this may cause problems with previously defined services: https://github.com/kubernetes/kops/issues/2496")
}
argv = append(argv, fmt.Sprintf("--watch-ingress=%t", watchIngress))
if cluster.Spec.ExternalDNS.WatchNamespace != "" {
argv = append(argv, fmt.Sprintf("--watch-namespace=%s", cluster.Spec.ExternalDNS.WatchNamespace))
}
}
if dns.IsGossipHostname(cluster.Spec.MasterInternalName) {
argv = append(argv, "--dns=gossip")
// Configuration specifically for the DNS controller gossip
if cluster.Spec.DNSControllerGossipConfig != nil {
if cluster.Spec.DNSControllerGossipConfig.Protocol != nil {
argv = append(argv, "--gossip-protocol="+*cluster.Spec.DNSControllerGossipConfig.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Listen != nil {
argv = append(argv, "--gossip-listen="+*cluster.Spec.DNSControllerGossipConfig.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secret != nil {
argv = append(argv, "--gossip-secret="+*cluster.Spec.DNSControllerGossipConfig.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Seed != nil {
argv = append(argv, "--gossip-seed="+*cluster.Spec.DNSControllerGossipConfig.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
}
if cluster.Spec.DNSControllerGossipConfig.Secondary != nil {
if cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol != nil {
argv = append(argv, "--gossip-protocol-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Listen != nil {
argv = append(argv, "--gossip-listen-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Secret != nil {
argv = append(argv, "--gossip-secret-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Seed != nil {
argv = append(argv, "--gossip-seed-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
}
} else {
// Default to primary mesh and secondary memberlist
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
argv = append(argv, "--gossip-protocol-secondary=memberlist")
argv = append(argv, fmt.Sprintf("--gossip-listen-secondary=0.0.0.0:%d", wellknownports.DNSControllerGossipMemberlist))
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
} else {
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
argv = append(argv, "--dns=gossip")
} else {
argv = append(argv, "--dns=aws-route53")
}
case kops.CloudProviderGCE:
argv = append(argv, "--dns=google-clouddns")
case kops.CloudProviderDO:
argv = append(argv, "--dns=digitalocean")
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
}
zone := cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
argv = append(argv, "--zone="+zone)
} else {
// match by id
argv = append(argv, "--zone=*/"+zone)
}
}
// permit wildcard updates
argv = append(argv, "--zone=*/*")
// Verbose, but not crazy logging
argv = append(argv, "-v=2")
return argv, nil
}
// KopsControllerConfig returns the yaml configuration for kops-controller
func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
cluster := tf.Cluster
config := &kopscontrollerconfig.Options{
Cloud: cluster.Spec.CloudProvider,
ConfigBase: cluster.Spec.ConfigBase,
}
// To avoid indentation problems, we marshal as json. json is a subset of yaml
b, err := json.Marshal(config)
if err != nil {
return "", fmt.Errorf("failed to serialize kops-controller config: %v", err)
}
return string(b), nil
}
// KopsControllerArgv returns the args to kops-controller
func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) {
var argv []string
argv = append(argv, "/usr/bin/kops-controller")
// Verbose, but not excessive logging
argv = append(argv, "--v=2")
argv = append(argv, "--conf=/etc/kubernetes/kops-controller/config.yaml")
return argv, nil
}
func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
cluster := tf.Cluster
var argv []string
cloudProvider := cluster.Spec.CloudProvider
switch kops.CloudProviderID(cloudProvider) {
case kops.CloudProviderAWS:
argv = append(argv, "--provider=aws")
case kops.CloudProviderGCE:
project := cluster.Spec.Project
argv = append(argv, "--provider=google")
argv = append(argv, "--google-project="+project)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
argv = append(argv, "--source=ingress")
return argv, nil
}
func (tf *TemplateFunctions) ProxyEnv() map[string]string {
cluster := tf.Cluster
envs := map[string]string{}
proxies := cluster.Spec.EgressProxy
if proxies == nil {
return envs
}
httpProxy := proxies.HTTPProxy
if httpProxy.Host != "" {
var portSuffix string
if httpProxy.Port != 0 {
portSuffix = ":" + strconv.Itoa(httpProxy.Port)
} else {
portSuffix = ""
}
url := "http://" + httpProxy.Host + portSuffix
envs["http_proxy"] = url
envs["https_proxy"] = url
}
if proxies.ProxyExcludes != "" {
envs["no_proxy"] = proxies.ProxyExcludes
envs["NO_PROXY"] = proxies.ProxyExcludes
}
return envs
}
// KopsSystemEnv builds the env vars for a system component
func (tf *TemplateFunctions) KopsSystemEnv() []corev1.EnvVar {
envMap := env.BuildSystemComponentEnvVars(&tf.Cluster.Spec)
return envMap.ToEnvVars()
}
// OpenStackCCM returns OpenStack external cloud controller manager current image
// with tag specified to k8s version
func (tf *TemplateFunctions) OpenStackCCM() string {
var tag string
parsed, err := util.ParseKubernetesVersion(tf.Cluster.Spec.KubernetesVersion)
if err != nil {
tag = "latest"
} else {
if parsed.Minor == 13 {
// The bugfix release
tag = "1.13.1"
} else {
// otherwise we use always .0 ccm image, if needed that can be overrided using clusterspec
tag = fmt.Sprintf("v%d.%d.0", parsed.Major, parsed.Minor)
}
}
return fmt.Sprintf("docker.io/k8scloudprovider/openstack-cloud-controller-manager:%s", tag)
}
|
[
"\"DIGITALOCEAN_ACCESS_TOKEN\"",
"\"AWS_REGION\""
] |
[] |
[
"DIGITALOCEAN_ACCESS_TOKEN",
"AWS_REGION"
] |
[]
|
["DIGITALOCEAN_ACCESS_TOKEN", "AWS_REGION"]
|
go
| 2 | 0 | |
end_to_end/plugin_test.go
|
package end_to_end_test
import (
"fmt"
"os"
"os/exec"
path "path/filepath"
"github.com/greenplum-db/gp-common-go-libs/cluster"
"github.com/greenplum-db/gp-common-go-libs/dbconn"
"github.com/greenplum-db/gp-common-go-libs/iohelper"
"github.com/greenplum-db/gp-common-go-libs/testhelper"
"github.com/greenplum-db/gpbackup/filepath"
"github.com/greenplum-db/gpbackup/testutils"
"github.com/greenplum-db/gpbackup/utils"
. "github.com/onsi/ginkgo"
)
func copyPluginToAllHosts(conn *dbconn.DBConn, pluginPath string) {
hostnameQuery := `SELECT DISTINCT hostname AS string FROM gp_segment_configuration WHERE content != -1`
hostnames := dbconn.MustSelectStringSlice(conn, hostnameQuery)
for _, hostname := range hostnames {
pluginDir, _ := path.Split(pluginPath)
command := exec.Command("ssh", hostname, fmt.Sprintf("mkdir -p %s", pluginDir))
mustRunCommand(command)
command = exec.Command("scp", pluginPath, fmt.Sprintf("%s:%s", hostname, pluginPath))
mustRunCommand(command)
}
}
func forceMetadataFileDownloadFromPlugin(conn *dbconn.DBConn, timestamp string) {
fpInfo := filepath.NewFilePathInfo(backupCluster, "", timestamp, filepath.GetSegPrefix(conn))
remoteOutput := backupCluster.GenerateAndExecuteCommand(
fmt.Sprintf("Removing backups on all segments for "+
"timestamp %s", timestamp), func(contentID int) string {
return fmt.Sprintf("rm -rf %s", fpInfo.GetDirForContent(contentID))
}, cluster.ON_SEGMENTS_AND_MASTER)
if remoteOutput.NumErrors != 0 {
Fail(fmt.Sprintf("Failed to remove backup directory for timestamp %s", timestamp))
}
}
var _ = Describe("End to End plugin tests", func() {
BeforeEach(func() {
end_to_end_setup()
})
AfterEach(func() {
end_to_end_teardown()
})
Describe("Single data file", func() {
It("runs gpbackup and gprestore with single-data-file flag", func() {
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--single-data-file",
"--backup-dir", backupDir)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--backup-dir", backupDir)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
assertArtifactsCleaned(restoreConn, timestamp)
})
It("runs gpbackup and gprestore with single-data-file flag without compression", func() {
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--single-data-file",
"--backup-dir", backupDir,
"--no-compression")
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--backup-dir", backupDir)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
assertArtifactsCleaned(restoreConn, timestamp)
})
It("runs gpbackup and gprestore on database with all objects", func() {
testhelper.AssertQueryRuns(backupConn,
"DROP SCHEMA IF EXISTS schema2 CASCADE; DROP SCHEMA public CASCADE; CREATE SCHEMA public; DROP PROCEDURAL LANGUAGE IF EXISTS plpythonu;")
defer testutils.ExecuteSQLFile(backupConn,
"resources/test_tables_data.sql")
defer testutils.ExecuteSQLFile(backupConn,
"resources/test_tables_ddl.sql")
defer testhelper.AssertQueryRuns(backupConn,
"DROP SCHEMA IF EXISTS schema2 CASCADE; DROP SCHEMA public CASCADE; CREATE SCHEMA public; DROP PROCEDURAL LANGUAGE IF EXISTS plpythonu;")
defer testhelper.AssertQueryRuns(restoreConn,
"DROP SCHEMA IF EXISTS schema2 CASCADE; DROP SCHEMA public CASCADE; CREATE SCHEMA public; DROP PROCEDURAL LANGUAGE IF EXISTS plpythonu;")
testhelper.AssertQueryRuns(backupConn,
"CREATE ROLE testrole SUPERUSER")
defer testhelper.AssertQueryRuns(backupConn,
"DROP ROLE testrole")
testutils.ExecuteSQLFile(backupConn, "resources/gpdb4_objects.sql")
if backupConn.Version.AtLeast("5") {
testutils.ExecuteSQLFile(backupConn, "resources/gpdb5_objects.sql")
}
if backupConn.Version.AtLeast("6") {
testutils.ExecuteSQLFile(backupConn, "resources/gpdb6_objects.sql")
defer testhelper.AssertQueryRuns(backupConn,
"DROP FOREIGN DATA WRAPPER fdw CASCADE;")
defer testhelper.AssertQueryRuns(restoreConn,
"DROP FOREIGN DATA WRAPPER fdw CASCADE;")
}
if backupConn.Version.AtLeast("6.2") {
testhelper.AssertQueryRuns(backupConn,
"CREATE TABLE mview_table1(i int, j text);")
defer testhelper.AssertQueryRuns(restoreConn,
"DROP TABLE mview_table1;")
testhelper.AssertQueryRuns(backupConn,
"CREATE MATERIALIZED VIEW mview1 (i2) as select i from mview_table1;")
defer testhelper.AssertQueryRuns(restoreConn,
"DROP MATERIALIZED VIEW mview1;")
testhelper.AssertQueryRuns(backupConn,
"CREATE MATERIALIZED VIEW mview2 as select * from mview1;")
defer testhelper.AssertQueryRuns(restoreConn,
"DROP MATERIALIZED VIEW mview2;")
}
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--leaf-partition-data",
"--single-data-file")
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--metadata-only",
"--redirect-db", "restoredb")
assertArtifactsCleaned(restoreConn, timestamp)
})
Context("with include filtering on restore", func() {
It("runs gpbackup and gprestore with include-table-file restore flag with a single data file", func() {
includeFile := iohelper.MustOpenFileForWriting("/tmp/include-tables.txt")
utils.MustPrintln(includeFile, "public.sales\npublic.foo\npublic.myseq1\npublic.myview1")
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--backup-dir", backupDir,
"--single-data-file")
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--backup-dir", backupDir,
"--include-table-file", "/tmp/include-tables.txt")
assertRelationsCreated(restoreConn, 16)
assertDataRestored(restoreConn, map[string]int{
"public.sales": 13, "public.foo": 40000})
assertArtifactsCleaned(restoreConn, timestamp)
_ = os.Remove("/tmp/include-tables.txt")
})
It("runs gpbackup and gprestore with include-schema restore flag with a single data file", func() {
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--backup-dir", backupDir,
"--single-data-file")
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--backup-dir", backupDir,
"--include-schema", "schema2")
assertRelationsCreated(restoreConn, 17)
assertDataRestored(restoreConn, schema2TupleCounts)
assertArtifactsCleaned(restoreConn, timestamp)
})
})
Context("with plugin", func() {
BeforeEach(func() {
skipIfOldBackupVersionBefore("1.7.0")
// FIXME: we are temporarily disabling these tests because we will be altering our backwards compatibility logic.
if useOldBackupVersion {
Skip("This test is only needed for the most recent backup versions")
}
})
It("runs gpbackup and gprestore with plugin, single-data-file, and no-compression", func() {
pluginExecutablePath := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins/example_plugin.bash", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, pluginExecutablePath)
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--single-data-file",
"--no-compression",
"--plugin-config", pluginConfigPath)
forceMetadataFileDownloadFromPlugin(backupConn, timestamp)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--plugin-config", pluginConfigPath)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
assertArtifactsCleaned(restoreConn, timestamp)
})
It("runs gpbackup and gprestore with plugin and single-data-file", func() {
pluginExecutablePath := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins/example_plugin.bash", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, pluginExecutablePath)
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--single-data-file",
"--plugin-config", pluginConfigPath)
forceMetadataFileDownloadFromPlugin(backupConn, timestamp)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--plugin-config", pluginConfigPath)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
assertArtifactsCleaned(restoreConn, timestamp)
})
It("runs gpbackup and gprestore with plugin and metadata-only", func() {
pluginExecutablePath := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins/example_plugin.bash", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, pluginExecutablePath)
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--metadata-only",
"--plugin-config", pluginConfigPath)
forceMetadataFileDownloadFromPlugin(backupConn, timestamp)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--plugin-config", pluginConfigPath)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertArtifactsCleaned(restoreConn, timestamp)
})
})
})
Describe("Multi-file Plugin", func() {
It("runs gpbackup and gprestore with plugin and no-compression", func() {
skipIfOldBackupVersionBefore("1.7.0")
// FIXME: we are temporarily disabling these tests because we will be altering our backwards compatibility logic.
if useOldBackupVersion {
Skip("This test is only needed for the most recent backup versions")
}
pluginExecutablePath := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins/example_plugin.bash", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, pluginExecutablePath)
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--no-compression",
"--plugin-config", pluginConfigPath)
forceMetadataFileDownloadFromPlugin(backupConn, timestamp)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--plugin-config", pluginConfigPath)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
})
It("runs gpbackup and gprestore with plugin and compression", func() {
skipIfOldBackupVersionBefore("1.7.0")
// FIXME: we are temporarily disabling these tests because we will be altering our backwards compatibility logic.
if useOldBackupVersion {
Skip("This test is only needed for the most recent backup versions")
}
pluginExecutablePath := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins/example_plugin.bash", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, pluginExecutablePath)
timestamp := gpbackup(gpbackupPath, backupHelperPath,
"--plugin-config", pluginConfigPath)
forceMetadataFileDownloadFromPlugin(backupConn, timestamp)
gprestore(gprestorePath, restoreHelperPath, timestamp,
"--redirect-db", "restoredb",
"--plugin-config", pluginConfigPath)
assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
assertDataRestored(restoreConn, publicSchemaTupleCounts)
assertDataRestored(restoreConn, schema2TupleCounts)
})
})
Describe("Example Plugin", func() {
It("runs example_plugin.bash with plugin_test_bench", func() {
if useOldBackupVersion {
Skip("This test is only needed for the latest backup version")
}
pluginsDir := fmt.Sprintf("%s/go/src/github.com/greenplum-db/gpbackup/plugins", os.Getenv("HOME"))
copyPluginToAllHosts(backupConn, fmt.Sprintf("%s/example_plugin.bash", pluginsDir))
command := exec.Command("bash", "-c", fmt.Sprintf("%s/plugin_test_bench.sh %s/example_plugin.bash %s/example_plugin_config.yaml", pluginsDir, pluginsDir, pluginsDir))
mustRunCommand(command)
_ = os.RemoveAll("/tmp/plugin_dest")
})
})
})
|
[
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Import Linter'
copyright = '2019 David Seddon'
author = 'David Seddon'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.2.4'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'import-linter'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'import-linter.tex', 'Import Linter Documentation',
'David Seddon', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'import-linter', 'Import Linter Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'import-linter', 'Import Linter Documentation',
author, 'Import Linter', "Lint your Python project's imports.",
'Miscellaneous'),
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
libs/mutraffSim/pruebaReroute.py
|
import traci
import subprocess
import sys
import os
from sumolib import checkBinary
import time
if __name__ == '__main__':
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools"))
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in doc
except ImportError:
sys.exit("please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
sumoBinary = checkBinary('sumo-gui')
sumoProcess = subprocess.Popen([sumoBinary, "-c", "../prueba.sumocfg", "--remote-port", "2081", "-l", "prueba_log"], stdout=sys.stdout, stderr=sys.stderr)
traci.init(2081)
while traci.simulation.getMinExpectedNumber() > 0:
print ("time: " + str(traci.simulation.getCurrentTime()/1000))
option=raw_input("Opcion: ")
if option=="c":
traci.simulationStep()
elif option=="i":
id=raw_input("ID del vehiculo: ")
print ("Ruta prevista: " + str(traci.vehicle.getRoute(id)) + "\n")
edge=raw_input("edge de la ruta: ")
edge_list=[]
while edge<>"f":
edge_list.append(edge)
edge=raw_input("edge de la ruta: ")
traci.vehicle.setRoute(id, edge_list)
print (traci.vehicle.getRoute(id))
print "Fin de la simulacion"
|
[] |
[] |
[
"SUMO_HOME"
] |
[]
|
["SUMO_HOME"]
|
python
| 1 | 0 | |
others/codes/tp_batch_predict.py
|
# this script modified from built-in predict.py script
# # original codes refer to fasterRCNN/predict.py
import argparse
import itertools
import os
import os.path
import shutil
import socket
import subprocess # for calling shell script
import sys
import tempfile
import time
import cv2
import numpy as np
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from config import config as cfg
from config import finalize_configs
from data import get_eval_dataflow, get_train_dataflow
from dataset import DatasetRegistry, register_balloon, register_coco
from eval import DetectionResult, multithread_predict_dataflow, predict_image
from modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel
from tensorpack.predict import (MultiTowerOfflinePredictor, OfflinePredictor,
PredictConfig)
from tensorpack.tfutils import SmartInit, get_tf_version_tuple
from tensorpack.tfutils.export import ModelExporter
from tensorpack.utils import fs, logger
from viz import (draw_annotation, draw_final_outputs,
draw_final_outputs_blackwhite, draw_predictions,
draw_proposal_recall)
from os import walk
from shutil import copyfile
import time
import ntpath
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# do_predict(predictor, image_file)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func) # get error from this
img_name = ntpath.basename(input_file)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
if not os.path.exists(result_folder):
os.makedirs(result_folder)
cv2.imwrite(result_folder + img_name, viz)
logger.info("Inference output for {} written to {}".format(input_file, result_folder))
if __name__ == '__main__':
register_coco(cfg.DATA.BASEDIR)
MODEL = ResNetFPNModel()
finalize_configs(is_training=False)
predcfg = PredictConfig(
model=MODEL,
session_init=SmartInit("data/train_log_bam/27.01.2020_bam_old_backup/checkpoint"),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
predictor = OfflinePredictor(predcfg)
source_folder = "data/forTest_BAM/old/"
result_folder = source_folder + "../" +"result_old_50000/"
f1 = []
for (dirpath, dirnames, filenames) in walk(source_folder):
f1.extend(filenames)
for img in f1:
img = source_folder + img
do_predict(predictor, img)
print("Done!")
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/api_server.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP.
The Remote API protocol is used for communication with the API stubs.
The APIServer can be started either as a stand alone binary or directly from
other scripts, eg dev_appserver.py. When using as a stand alone binary, the
APIServer can be launched with or without the context of a specific application.
To launch the API Server in the context of an application, launch the APIServer
in the same way as dev_appserver.py:
api_server.py [flags] <module> [<module>...]
When launching without the context of an application, a default application id
is provided, which can be overidden with the --application flag. Either of the
following are acceptable:
api_server.py [flags]
api_server.py --application=my-app-id [flags]
"""
import errno
import getpass
import itertools
import logging
import os
import pickle
import shutil
import sys
import tempfile
import threading
import time
import traceback
import urlparse
import google
import yaml
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail_stub
from google.appengine.api import request_info as request_info_lib
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.modules import modules_stub
from google.appengine.api.remote_socket import _remote_socket_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.system import system_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import datastore_v4_stub
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import cli_parser
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import metrics
from google.appengine.tools.devappserver2 import shutdown
from google.appengine.tools.devappserver2 import wsgi_request_info
from google.appengine.tools.devappserver2 import wsgi_server
# The API lock is applied when calling API stubs that are not threadsafe.
GLOBAL_API_LOCK = threading.RLock()
# The default app id used when launching the api_server.py as a binary, without
# providing the context of a specific application.
DEFAULT_API_SERVER_APP_ID = 'dev~app_id'
# We don't want to support datastore_v4 everywhere, because users are supposed
# to use the Cloud Datastore API going forward, so we don't want to put these
# entries in remote_api_servers.SERVICE_PB_MAP. But for our own implementation
# of the Cloud Datastore API we need those methods to work when an instance
# issues them, specifically the DatstoreApiServlet running as a module inside
# the app we are running. The consequence is that other app code can also
# issue datastore_v4 API requests, but since we don't document these requests
# or export them through any language bindings this is unlikely in practice.
_DATASTORE_V4_METHODS = {
'AllocateIds': (datastore_v4_pb.AllocateIdsRequest,
datastore_v4_pb.AllocateIdsResponse),
'BeginTransaction': (datastore_v4_pb.BeginTransactionRequest,
datastore_v4_pb.BeginTransactionResponse),
'Commit': (datastore_v4_pb.CommitRequest,
datastore_v4_pb.CommitResponse),
'ContinueQuery': (datastore_v4_pb.ContinueQueryRequest,
datastore_v4_pb.ContinueQueryResponse),
'Lookup': (datastore_v4_pb.LookupRequest,
datastore_v4_pb.LookupResponse),
'Rollback': (datastore_v4_pb.RollbackRequest,
datastore_v4_pb.RollbackResponse),
'RunQuery': (datastore_v4_pb.RunQueryRequest,
datastore_v4_pb.RunQueryResponse),
}
# TODO: Remove after the Files API is really gone.
_FILESAPI_USE_TRACKER = None
_FILESAPI_ENABLED = True
def enable_filesapi_tracking(request_data):
"""Turns on per-request tracking of Files API use.
Args:
request_data: An object with a set_filesapi_used(request_id) method to
track Files API use.
"""
global _FILESAPI_USE_TRACKER
_FILESAPI_USE_TRACKER = request_data
def set_filesapi_enabled(enabled):
"""Enables or disables the Files API."""
global _FILESAPI_ENABLED
_FILESAPI_ENABLED = enabled
def _execute_request(request, use_proto3=False):
"""Executes an API method call and returns the response object.
Args:
request: A remote_api_pb.Request object representing the API call e.g. a
call to memcache.Get.
use_proto3: A boolean representing is request is in proto3.
Returns:
A ProtocolBuffer.ProtocolMessage representing the API response e.g. a
memcache_service_pb.MemcacheGetResponse.
Raises:
apiproxy_errors.CallNotFoundError: if the requested method doesn't exist.
apiproxy_errors.ApplicationError: if the API method calls fails.
"""
if use_proto3:
service = request.service_name
method = request.method
if request.request_id:
request_id = request.request_id
else:
logging.error('Received a request without request_id: %s', request)
request_id = None
else:
service = request.service_name()
method = request.method()
if request.has_request_id():
request_id = request.request_id()
else:
logging.error('Received a request without request_id: %s', request)
request_id = None
service_methods = (_DATASTORE_V4_METHODS if service == 'datastore_v4'
else remote_api_services.SERVICE_PB_MAP.get(service, {}))
# We do this rather than making a new map that is a superset of
# remote_api_services.SERVICE_PB_MAP because that map is not initialized
# all in one place, so we would have to be careful about where we made
# our new map.
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,
method))
# TODO: Remove after the Files API is really gone.
if not _FILESAPI_ENABLED and service == 'file':
raise apiproxy_errors.CallNotFoundError(
'Files API method %s.%s is disabled. Further information: '
'https://cloud.google.com/appengine/docs/deprecations/files_api'
% (service, method))
request_data = request_class()
if use_proto3:
request_data.ParseFromString(request.request)
else:
request_data.ParseFromString(request.request())
response_data = response_class()
service_stub = apiproxy_stub_map.apiproxy.GetStub(service)
def make_request():
# TODO: Remove after the Files API is really gone.
if (_FILESAPI_USE_TRACKER is not None
and service == 'file' and request_id is not None):
_FILESAPI_USE_TRACKER.set_filesapi_used(request_id)
service_stub.MakeSyncCall(service,
method,
request_data,
response_data,
request_id)
# If the service has not declared itself as threadsafe acquire
# GLOBAL_API_LOCK.
if service_stub.THREADSAFE:
make_request()
else:
with GLOBAL_API_LOCK:
make_request()
metrics.GetMetricsLogger().LogOnceOnStop(
metrics.API_STUB_USAGE_CATEGORY,
metrics.API_STUB_USAGE_ACTION_TEMPLATE % service)
return response_data
class GRPCAPIServer(object):
"""Serves API calls over GPC."""
def __init__(self, port):
self._port = port
self._stop = False
self._server = None
def _start_server(self):
"""Starts gRPC API server."""
grpc_service_pb2 = __import__('google.appengine.tools.devappserver2.'
'grpc_service_pb2', globals(), locals(),
['grpc_service_pb2'])
class CallHandler(grpc_service_pb2.BetaCallHandlerServicer):
"""Handles gRPC method calls."""
def HandleCall(self, request, context):
# TODO: b/36590656#comment3 - Add exception handling logic here.
api_response = _execute_request(request, use_proto3=True)
response = grpc_service_pb2.Response(response=api_response.Encode())
return response
self._server = grpc_service_pb2.beta_create_CallHandler_server(
CallHandler())
# add_insecure_port() returns positive port number when port allocation is
# successful. Otherwise it returns 0, and we handle the exception in start()
# from the caller thread.
# 'localhost' works with both ipv4 and ipv6.
self._port = self._server.add_insecure_port('localhost:' + str(self._port))
# We set this GRPC_PORT in environment variable as it is only accessed by
# the devappserver process.
os.environ['GRPC_PORT'] = str(self._port)
if self._port:
logging.info('Starting GRPC_API_server at: http://localhost:%d',
self._port)
self._server.start()
def start(self):
with threading.Lock():
self._server_thread = threading.Thread(target=self._start_server)
self._server_thread.start()
self._server_thread.join()
if not self._port:
raise errors.GrpcPortError('Error assigning grpc api port!')
def quit(self):
logging.info('Keyboard interrupting grpc_api_server')
self._server.stop(0)
class APIServer(wsgi_server.WsgiServer):
"""Serves API calls over HTTP."""
def __init__(self, host, port, app_id, datastore_emulator_host=None):
self._app_id = app_id
self._host = host
super(APIServer, self).__init__((host, port), self)
self.set_balanced_address('localhost:8080')
self._datastore_emulator_stub = None
if datastore_emulator_host:
global grpc_proxy_util
# pylint: disable=g-import-not-at-top
# We lazy import here because grpc binaries are not always present.
from google.appengine.tools.devappserver2 import grpc_proxy_util
self._datastore_emulator_stub = grpc_proxy_util.create_stub(
datastore_emulator_host)
def start(self):
"""Start the API Server."""
super(APIServer, self).start()
logging.info('Starting API server at: http://%s:%d', self._host, self.port)
def quit(self):
cleanup_stubs()
super(APIServer, self).quit()
def set_balanced_address(self, balanced_address):
"""Sets the balanced address from the dispatcher (e.g. "localhost:8080").
This is used to enable APIs to build valid URLs.
Args:
balanced_address: string address of the balanced HTTP server.
"""
self._balanced_address = balanced_address
def _handle_POST(self, environ, start_response):
"""Handles a POST request containing a serialized remote_api_pb.Request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A start_response function with semantics defined in
PEP-333.
Returns:
A single element list containing the string body of the HTTP response.
"""
start_response('200 OK', [('Content-Type', 'application/octet-stream')])
start_time = time.time()
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
# NOTE: Exceptions encountered when parsing the PB or handling the request
# will be propagated back to the caller the same way as exceptions raised
# by the actual API call.
if environ.get('HTTP_TRANSFER_ENCODING') == 'chunked':
# CherryPy concatenates all chunks when 'wsgi.input' is read but v3.2.2
# will not return even when all of the data in all chunks has been
# read. See: https://bitbucket.org/cherrypy/cherrypy/issue/1131.
wsgi_input = environ['wsgi.input'].read(2**32)
else:
wsgi_input = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
request.ParseFromString(wsgi_input)
service = request.service_name()
if service == 'datastore_v3' and self._datastore_emulator_stub:
response = grpc_proxy_util.make_grpc_call_from_remote_api(
self._datastore_emulator_stub, request)
else:
if request.has_request_id():
request_id = request.request_id()
service_stub = apiproxy_stub_map.apiproxy.GetStub(service)
environ['HTTP_HOST'] = self._balanced_address
op = getattr(service_stub.request_data, 'register_request_id', None)
if callable(op):
op(environ, request_id)
api_response = _execute_request(request).Encode()
response.set_response(api_response)
except Exception, e:
if isinstance(e, apiproxy_errors.ApplicationError):
level = logging.DEBUG
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
else:
# If the runtime instance is not Python, it won't be able to unpickle
# the exception so use level that won't be ignored by default.
level = logging.ERROR
# Even if the runtime is Python, the exception may be unpicklable if
# it requires importing a class blocked by the sandbox so just send
# back the exception representation.
# But due to our use of the remote API, at least some apiproxy errors
# are generated in the Dev App Server main instance and not in the
# language runtime and wrapping them causes different behavior from
# prod so don't wrap them.
if not isinstance(e, apiproxy_errors.Error):
e = RuntimeError(repr(e))
# While not strictly necessary for ApplicationError, do this to limit
# differences with remote_api:handler.py.
response.set_exception(pickle.dumps(e))
logging.log(level, 'Exception while handling %s\n%s', request,
traceback.format_exc())
encoded_response = response.Encode()
logging.debug('Handled %s.%s in %0.4f',
request.service_name(),
request.method(),
time.time() - start_time)
return [encoded_response]
def _handle_GET(self, environ, start_response):
params = urlparse.parse_qs(environ['QUERY_STRING'])
rtok = params.get('rtok', ['0'])[0]
start_response('200 OK', [('Content-Type', 'text/plain')])
return [yaml.dump({'app_id': self._app_id,
'rtok': rtok})]
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
return self._handle_GET(environ, start_response)
elif environ['REQUEST_METHOD'] == 'POST':
return self._handle_POST(environ, start_response)
else:
start_response('405 Method Not Allowed', [])
return []
def create_api_server(
request_info, storage_path, options, app_id, app_root,
datastore_emulator_host=None):
"""Creates an API server.
Args:
request_info: An apiproxy_stub.RequestInfo instance used by the stubs to
lookup information about the request associated with an API call.
storage_path: A string directory for storing API stub data.
options: An instance of argparse.Namespace containing command line flags.
app_id: String representing an application ID, used for configuring paths
and string constants in API stubs.
app_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp", used for locating application yaml
files, eg index.yaml for the datastore stub.
datastore_emulator_host: String, the hostname:port on which cloud datastore
emualtor runs.
Returns:
An instance of APIServer.
"""
datastore_path = options.datastore_path or os.path.join(
storage_path, 'datastore.db')
logs_path = options.logs_path or os.path.join(storage_path, 'logs.db')
search_index_path = options.search_indexes_path or os.path.join(
storage_path, 'search_indexes')
blobstore_path = options.blobstore_path or os.path.join(
storage_path, 'blobs')
if options.clear_datastore:
_clear_datastore_storage(datastore_path)
if options.clear_search_indexes:
_clear_search_indexes_storage(search_index_path)
if options.auto_id_policy == datastore_stub_util.SEQUENTIAL:
logging.warn("--auto_id_policy='sequential' is deprecated. This option "
"will be removed in a future release.")
application_address = '%s' % options.host
if options.port and options.port != 80:
application_address += ':' + str(options.port)
user_login_url = '/%s?%s=%%s' % (
login.LOGIN_URL_RELATIVE, login.CONTINUE_PARAM)
user_logout_url = '%s&%s=%s' % (
user_login_url, login.ACTION_PARAM, login.LOGOUT_ACTION)
if options.datastore_consistency_policy == 'time':
consistency = datastore_stub_util.TimeBasedHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'random':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'consistent':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy(1.0)
else:
assert 0, ('unknown consistency policy: %r' %
options.datastore_consistency_policy)
maybe_convert_datastore_file_stub_data_to_sqlite(app_id, datastore_path)
setup_stubs(
request_data=request_info,
app_id=app_id,
application_root=app_root,
# The "trusted" flag is only relevant for Google administrative
# applications.
trusted=getattr(options, 'trusted', False),
appidentity_email_address=options.appidentity_email_address,
appidentity_private_key_path=os.path.abspath(
options.appidentity_private_key_path)
if options.appidentity_private_key_path else None,
blobstore_path=blobstore_path,
datastore_path=datastore_path,
datastore_consistency=consistency,
datastore_require_indexes=options.require_indexes,
datastore_auto_id_policy=options.auto_id_policy,
images_host_prefix='http://%s' % application_address,
logs_path=logs_path,
mail_smtp_host=options.smtp_host,
mail_smtp_port=options.smtp_port,
mail_smtp_user=options.smtp_user,
mail_smtp_password=options.smtp_password,
mail_enable_sendmail=options.enable_sendmail,
mail_show_mail_body=options.show_mail_body,
mail_allow_tls=options.smtp_allow_tls,
search_index_path=search_index_path,
taskqueue_auto_run_tasks=options.enable_task_running,
taskqueue_default_http_server=application_address,
user_login_url=user_login_url,
user_logout_url=user_logout_url,
default_gcs_bucket_name=options.default_gcs_bucket_name,
appidentity_oauth_url=options.appidentity_oauth_url)
return APIServer(options.api_host, options.api_port, app_id,
datastore_emulator_host)
def _clear_datastore_storage(datastore_path):
"""Delete the datastore storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, err:
logging.warning(
'Failed to remove datastore file %r: %s', datastore_path, err)
def _clear_search_indexes_storage(search_index_path):
"""Delete the search indexes storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(search_index_path):
try:
os.remove(search_index_path)
except OSError, err:
logging.warning(
'Failed to remove search indexes file %r: %s', search_index_path, err)
def get_storage_path(path, app_id):
"""Returns a path to the directory where stub data can be stored."""
_, _, app_id = app_id.replace(':', '_').rpartition('~')
if path is None:
for path in _generate_storage_paths(app_id):
try:
os.mkdir(path, 0700)
except OSError, err:
if err.errno == errno.EEXIST:
# Check that the directory is only accessable by the current user to
# protect against an attacker creating the directory in advance in
# order to access any created files. Windows has per-user temporary
# directories and st_mode does not include per-user permission
# information so assume that it is safe.
if sys.platform == 'win32' or (
(os.stat(path).st_mode & 0777) == 0700 and os.path.isdir(path)):
return path
else:
continue
raise
else:
return path
elif not os.path.exists(path):
os.mkdir(path)
return path
elif not os.path.isdir(path):
raise IOError('the given storage path %r is a file, a directory was '
'expected' % path)
else:
return path
def _generate_storage_paths(app_id):
"""Yield an infinite sequence of possible storage paths."""
if sys.platform == 'win32':
# The temp directory is per-user on Windows so there is no reason to add
# the username to the generated directory name.
user_format = ''
else:
try:
user_name = getpass.getuser()
except Exception: # pylint: disable=broad-except
# The possible set of exceptions is not documented.
user_format = ''
else:
user_format = '.%s' % user_name
tempdir = tempfile.gettempdir()
yield os.path.join(tempdir, 'appengine.%s%s' % (app_id, user_format))
for i in itertools.count(1):
yield os.path.join(tempdir, 'appengine.%s%s.%d' % (app_id, user_format, i))
def setup_stubs(
request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name,
appidentity_oauth_url=None):
"""Configures the APIs hosted by this server.
Args:
request_data: An apiproxy_stub.RequestInformation instance used by the
stubs to lookup information about the request associated with an API
call.
app_id: The str application id e.g. "guestbook".
application_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp".
trusted: A bool indicating if privileged APIs should be made available.
appidentity_email_address: Email address associated with a service account
that has a downloadable key. May be None for no local application
identity.
appidentity_private_key_path: Path to private key file associated with
service account (.pem format). Must be set if appidentity_email_address
is set.
blobstore_path: The path to the file that should be used for blobstore
storage.
datastore_consistency: The datastore_stub_util.BaseConsistencyPolicy to
use as the datastore consistency policy.
datastore_path: The path to the file that should be used for datastore
storage.
datastore_require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
datastore_auto_id_policy: The type of sequence from which the datastore
stub assigns auto IDs, either datastore_stub_util.SEQUENTIAL or
datastore_stub_util.SCATTERED.
images_host_prefix: The URL prefix (protocol://host:port) to prepend to
image urls on calls to images.GetUrlBase.
logs_path: Path to the file to store the logs data in.
mail_smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the mail_enable_sendmail argument is considered.
mail_smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then mail_smtp_host must also be None.
mail_smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host is also None or if
the SMTP server does not require authentication.
mail_smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host or mail_smtp_user
is also None.
mail_enable_sendmail: A bool indicating if sendmail should be used when
sending e-mails. This argument is ignored if mail_smtp_host is not None.
mail_show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
mail_allow_tls: A bool indicating whether TLS should be allowed when
communicating with an SMTP server. This argument is ignored if
mail_smtp_host is None.
search_index_path: The path to the file that should be used for search index
storage.
taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
taskqueue_default_http_server: A str containing the address of the http
server that should be used to execute tasks.
user_login_url: A str containing the url that should be used for user login.
user_logout_url: A str containing the url that should be used for user
logout.
default_gcs_bucket_name: A str, overriding the default bucket behavior.
appidentity_oauth_url: A str containing the url to the oauth2 server to use
to authenticate the private key. If set to None, then the standard
google oauth2 server is used.
"""
identity_stub = app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path,
oauth_url=appidentity_oauth_url)
if default_gcs_bucket_name is not None:
identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name)
apiproxy_stub_map.apiproxy.RegisterStub('app_identity_service', identity_stub)
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub(request_data=request_data))
apiproxy_stub_map.apiproxy.ReplaceStub(
'datastore_v3',
datastore_sqlite_stub.DatastoreSqliteStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=datastore_auto_id_policy,
consistency_policy=datastore_consistency))
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
try:
from google.appengine.api.images import images_stub
except ImportError:
# We register a stub which throws a NotImplementedError for most RPCs.
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub(
host_prefix=images_host_prefix))
else:
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=images_host_prefix))
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
enable_sendmail=mail_enable_sendmail,
show_mail_body=mail_show_mail_body,
allow_tls=mail_allow_tls))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'modules',
modules_stub.ModulesServiceStub(request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'remote_socket',
_remote_socket_stub.RemoteSocketServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub(index_file=search_index_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'system',
system_stub.SystemServiceStub(request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=application_root,
auto_task_running=taskqueue_auto_run_tasks,
default_http_server=taskqueue_default_http_server,
request_data=request_data))
apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution()
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=user_login_url,
logout_url=user_logout_url,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
def maybe_convert_datastore_file_stub_data_to_sqlite(app_id, filename):
if not os.access(filename, os.R_OK | os.W_OK):
return
try:
with open(filename, 'rb') as f:
if f.read(16) == 'SQLite format 3\x00':
return
except (IOError, OSError):
return
try:
_convert_datastore_file_stub_data_to_sqlite(app_id, filename)
except:
logging.exception('Failed to convert datastore file stub data to sqlite.')
raise
def _convert_datastore_file_stub_data_to_sqlite(app_id, datastore_path):
logging.info('Converting datastore stub data to sqlite.')
previous_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
datastore_stub = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, trusted=True, save_changes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
entities = _fetch_all_datastore_entities()
sqlite_datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(
app_id, datastore_path + '.sqlite', trusted=True)
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3',
sqlite_datastore_stub)
datastore.Put(entities)
sqlite_datastore_stub.Close()
finally:
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3', previous_stub)
shutil.copy(datastore_path, datastore_path + '.filestub')
os.remove(datastore_path)
shutil.move(datastore_path + '.sqlite', datastore_path)
logging.info('Datastore conversion complete. File stub data has been backed '
'up in %s', datastore_path + '.filestub')
def _fetch_all_datastore_entities():
"""Returns all datastore entities from all namespaces as a list."""
all_entities = []
for namespace in datastore.Query('__namespace__').Run():
namespace_name = namespace.key().name()
for kind in datastore.Query('__kind__', namespace=namespace_name).Run():
all_entities.extend(
datastore.Query(kind.key().name(), namespace=namespace_name).Run())
return all_entities
def test_setup_stubs(
request_data=None,
app_id='myapp',
application_root='/tmp/root',
trusted=False,
appidentity_email_address=None,
appidentity_private_key_path=None,
# TODO: is this correct? If I'm following the flow correctly, this
# should not be a file but a directory.
blobstore_path='/dev/null',
datastore_consistency=None,
datastore_path=':memory:',
datastore_require_indexes=False,
datastore_auto_id_policy=datastore_stub_util.SCATTERED,
images_host_prefix='http://localhost:8080',
logs_path=':memory:',
mail_smtp_host='',
mail_smtp_port=25,
mail_smtp_user='',
mail_smtp_password='',
mail_enable_sendmail=False,
mail_show_mail_body=False,
mail_allow_tls=True,
search_index_path=None,
taskqueue_auto_run_tasks=False,
taskqueue_default_http_server='http://localhost:8080',
user_login_url='/_ah/login?continue=%s',
user_logout_url='/_ah/login?continue=%s',
default_gcs_bucket_name=None,
appidentity_oauth_url=None):
"""Similar to setup_stubs with reasonable test defaults and recallable."""
# Reset the stub map between requests because a stub map only allows a
# stub to be added once.
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if datastore_consistency is None:
datastore_consistency = (
datastore_stub_util.PseudoRandomHRConsistencyPolicy())
setup_stubs(request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name,
appidentity_oauth_url)
def cleanup_stubs():
"""Do any necessary stub cleanup e.g. saving data."""
# Saving datastore
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
datastore_stub.Write()
logging.info('Saving search indexes')
apiproxy_stub_map.apiproxy.GetStub('search').Write()
apiproxy_stub_map.apiproxy.GetStub('taskqueue').Shutdown()
def main():
"""Parses command line options and launches the API server."""
shutdown.install_signal_handlers()
options = cli_parser.create_command_line_parser(
cli_parser.API_SERVER_CONFIGURATION).parse_args()
logging.getLogger().setLevel(
constants.LOG_LEVEL_TO_PYTHON_CONSTANT[options.dev_appserver_log_level])
# Parse the application configuration if config_paths are provided, else
# provide sensible defaults.
if options.config_paths:
app_config = application_configuration.ApplicationConfiguration(
options.config_paths, options.app_id)
app_id = app_config.app_id
app_root = app_config.modules[0].application_root
else:
app_id = ('dev~' + options.app_id if
options.app_id else DEFAULT_API_SERVER_APP_ID)
app_root = tempfile.mkdtemp()
# pylint: disable=protected-access
# TODO: Rename LocalFakeDispatcher or re-implement for api_server.py.
request_info = wsgi_request_info.WSGIRequestInfo(
request_info_lib._LocalFakeDispatcher())
# pylint: enable=protected-access
server = create_api_server(
request_info=request_info,
storage_path=get_storage_path(options.storage_path, app_id),
options=options, app_id=app_id, app_root=app_root)
try:
server.start()
shutdown.wait_until_shutdown()
finally:
server.quit()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"GRPC_PORT"
] |
[]
|
["GRPC_PORT"]
|
python
| 1 | 0 | |
mathgraph/tree_vertex.py
|
from mathgraph.vertex import Vertex
class TreeVertex(Vertex):
""" Tree vertex class
This class models the elements of the tree. It is a subclass of the Vertex class.
It also have the parent and the leafs ot he node of the tree.
Attribute:
----------
parent : TreeVertex, optional
Parent of the vertex
"""
def __init__(self,name,weight=0,parent=None,updaters=None,height=None):
""" We set the parent and the updaters.
"""
super().__init__(name,weight,updaters)
self.parent=parent
self.leafs=[]
self.height=height
def get_parent(self):
""" Returns the parent of the vertex
"""
return self.parent
def get_leafs(self):
""" Returns the leafs of the vertex
"""
return self.leafs
def get_height(self):
""" Returns the height of the vertex
"""
return self.height
def set_parent(self,parent):
""" Set the parent of the vertex
"""
self.parent=parent
def set_height(self,h):
""" Set the height of the vertex
"""
self.height=h
def add_leafs(self,leaf):
""" Add leafs to the vertex
"""
self.leafs.append(leaf)
def delete_parent(self):
""" Delete the parent
"""
self.parent=None
def delete_leaf(self,leaf):
""" Delete a leaf
"""
self.leafs.remove(leaf)
def __repr__(self):
rep="TreeVertex (name='"+self.name+"',weight="+str(self.weight)
if self.parent:
rep+=", parent="+self.parent.get_name()
rep+=")"
return rep
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tests/test_utils.py
|
import datetime
import time
import os
import tempfile
import shutil
from unittest import mock
from binascii import hexlify
DEFAULT_TIMESTAMP = datetime.datetime(2016, 1, 1)
DEFAULT_ISO_TIME = time.mktime(DEFAULT_TIMESTAMP.timetuple())
def mk_db_and_blob_dir():
db_dir = tempfile.mkdtemp()
blob_dir = tempfile.mkdtemp()
return db_dir, blob_dir
def rm_db_and_blob_dir(db_dir, blob_dir):
shutil.rmtree(db_dir, ignore_errors=True)
shutil.rmtree(blob_dir, ignore_errors=True)
def random_lbry_hash():
return hexlify(os.urandom(48)).decode()
def reset_time(test_case, timestamp=DEFAULT_TIMESTAMP):
iso_time = time.mktime(timestamp.timetuple())
patcher = mock.patch('time.time')
patcher.start().return_value = iso_time
test_case.addCleanup(patcher.stop)
patcher = mock.patch('lbrynet.utils.now')
patcher.start().return_value = timestamp
test_case.addCleanup(patcher.stop)
patcher = mock.patch('lbrynet.utils.utcnow')
patcher.start().return_value = timestamp
test_case.addCleanup(patcher.stop)
def is_android():
return 'ANDROID_ARGUMENT' in os.environ # detect Android using the Kivy way
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'school.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/osutil/osutil_linux.go
|
// Copyright 2017 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// +build !appengine
package osutil
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"unsafe"
)
// RemoveAll is similar to os.RemoveAll, but can handle more cases.
func RemoveAll(dir string) error {
files, _ := ioutil.ReadDir(dir)
for _, f := range files {
name := filepath.Join(dir, f.Name())
if f.IsDir() {
RemoveAll(name)
}
fn := []byte(name + "\x00")
syscall.Syscall(syscall.SYS_UMOUNT2, uintptr(unsafe.Pointer(&fn[0])), syscall.MNT_FORCE, 0)
}
if err := os.RemoveAll(dir); err != nil {
removeImmutable(dir)
return os.RemoveAll(dir)
}
return nil
}
func SystemMemorySize() uint64 {
var info syscall.Sysinfo_t
syscall.Sysinfo(&info)
return uint64(info.Totalram) //nolint:unconvert
}
func removeImmutable(fname string) error {
// Reset FS_XFLAG_IMMUTABLE/FS_XFLAG_APPEND.
fd, err := syscall.Open(fname, syscall.O_RDONLY, 0)
if err != nil {
return err
}
defer syscall.Close(fd)
flags := 0
var cmd uint64 // FS_IOC_SETFLAGS
switch runtime.GOARCH {
case "386", "arm":
cmd = 1074030082
case "amd64", "arm64":
cmd = 1074292226
case "ppc64le", "mips64le":
cmd = 2148034050
default:
panic("unknown arch")
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(&flags)))
return errno
}
func Sandbox(cmd *exec.Cmd, user, net bool) error {
enabled, uid, gid, err := initSandbox()
if err != nil || !enabled {
return err
}
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = new(syscall.SysProcAttr)
}
if net {
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNET | syscall.CLONE_NEWIPC |
syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWPID
}
if user {
cmd.SysProcAttr.Credential = &syscall.Credential{
Uid: uid,
Gid: gid,
}
}
return nil
}
func SandboxChown(file string) error {
enabled, uid, gid, err := initSandbox()
if err != nil || !enabled {
return err
}
return os.Chown(file, int(uid), int(gid))
}
var (
sandboxOnce sync.Once
sandboxEnabled = true
sandboxUsername = "syzkaller"
sandboxUID = ^uint32(0)
sandboxGID = ^uint32(0)
)
func initSandbox() (bool, uint32, uint32, error) {
sandboxOnce.Do(func() {
if syscall.Getuid() != 0 || os.Getenv("SYZ_DISABLE_SANDBOXING") == "yes" {
sandboxEnabled = false
return
}
uid, err := usernameToID("-u")
if err != nil {
return
}
gid, err := usernameToID("-g")
if err != nil {
return
}
sandboxUID = uid
sandboxGID = gid
})
if sandboxEnabled && sandboxUID == ^uint32(0) {
return false, 0, 0, fmt.Errorf("user %q is not found, can't sandbox command", sandboxUsername)
}
return sandboxEnabled, sandboxUID, sandboxGID, nil
}
func usernameToID(what string) (uint32, error) {
out, err := RunCmd(time.Minute, "", "id", what, sandboxUsername)
if err != nil {
return 0, err
}
str := strings.Trim(string(out), " \t\n")
id, err := strconv.ParseUint(str, 10, 32)
if err != nil {
return 0, err
}
return uint32(id), nil
}
func setPdeathsig(cmd *exec.Cmd) {
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = new(syscall.SysProcAttr)
}
cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
// We will kill the whole process group.
cmd.SysProcAttr.Setpgid = true
}
func killPgroup(cmd *exec.Cmd) {
syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)
}
func prolongPipe(r, w *os.File) {
for sz := 128 << 10; sz <= 2<<20; sz *= 2 {
syscall.Syscall(syscall.SYS_FCNTL, w.Fd(), syscall.F_SETPIPE_SZ, uintptr(sz))
}
}
|
[
"\"SYZ_DISABLE_SANDBOXING\""
] |
[] |
[
"SYZ_DISABLE_SANDBOXING"
] |
[]
|
["SYZ_DISABLE_SANDBOXING"]
|
go
| 1 | 0 | |
test/context.go
|
/*
* Copyright 2018 Venafi, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test
import (
"os"
)
type Context struct {
TPPurl string
TPPuser string
TPPPassword string
TPPaccessToken string
TPPZone string
TPPZoneRestricted string
TPPZoneECDSA string
TPPRefreshToken string
ClientID string
CloudUrl string
CloudAPIkey string
CloudZone string
CloudZoneRestricted string
}
func GetEnvContext() *Context {
//TODO: should rewrite to our standart variable names, TPPURL, TPPUSER etc
c := &Context{}
c.TPPurl = os.Getenv("TPP_URL")
c.TPPuser = os.Getenv("TPP_USER")
c.TPPPassword = os.Getenv("TPP_PASSWORD")
c.ClientID = os.Getenv("CLIENT_ID")
c.TPPZone = os.Getenv("TPP_ZONE")
c.TPPZoneRestricted = os.Getenv("TPP_ZONE_RESTRICTED")
c.TPPZoneECDSA = os.Getenv("TPP_ZONE_ECDSA")
c.CloudUrl = os.Getenv("CLOUD_URL")
c.CloudAPIkey = os.Getenv("CLOUD_APIKEY")
c.CloudZone = os.Getenv("CLOUD_ZONE")
c.CloudZoneRestricted = os.Getenv("CLOUD_ZONE_RESTRICTED")
return c
}
|
[
"\"TPP_URL\"",
"\"TPP_USER\"",
"\"TPP_PASSWORD\"",
"\"CLIENT_ID\"",
"\"TPP_ZONE\"",
"\"TPP_ZONE_RESTRICTED\"",
"\"TPP_ZONE_ECDSA\"",
"\"CLOUD_URL\"",
"\"CLOUD_APIKEY\"",
"\"CLOUD_ZONE\"",
"\"CLOUD_ZONE_RESTRICTED\""
] |
[] |
[
"CLOUD_URL",
"CLOUD_ZONE_RESTRICTED",
"TPP_URL",
"CLOUD_APIKEY",
"TPP_USER",
"TPP_ZONE_RESTRICTED",
"CLIENT_ID",
"CLOUD_ZONE",
"TPP_PASSWORD",
"TPP_ZONE",
"TPP_ZONE_ECDSA"
] |
[]
|
["CLOUD_URL", "CLOUD_ZONE_RESTRICTED", "TPP_URL", "CLOUD_APIKEY", "TPP_USER", "TPP_ZONE_RESTRICTED", "CLIENT_ID", "CLOUD_ZONE", "TPP_PASSWORD", "TPP_ZONE", "TPP_ZONE_ECDSA"]
|
go
| 11 | 0 | |
config/environment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Generic configuration for the project."""
import os
# Define the application base directory
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
DATA_PATH = os.path.join(BASE_DIR, 'data')
# Add the data/ directory if it doesn't exist.
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
# the PRODUCTION and DEVELOPMENT environment variables are set in uwsgi.conf
# on the webcompat server. If they're not set, they will default to None
PRODUCTION = os.environ.get('PRODUCTION')
# locally, we refer to this as STAGING
STAGING = os.environ.get('DEVELOPMENT')
# Are we serving the app from localhost?
LOCALHOST = not PRODUCTION and not STAGING
if PRODUCTION:
GITHUB_CLIENT_ID = os.environ.get('PROD_GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = os.environ.get('PROD_GITHUB_CLIENT_SECRET')
GITHUB_CALLBACK_URL = os.environ.get('PROD_GITHUB_CALLBACK_URL')
HOOK_SECRET_KEY = os.environ.get('HOOK_SECRET_KEY')
ISSUES_REPO_URI = 'webcompat/web-bugs/issues'
OAUTH_TOKEN = os.environ.get('PROD_OAUTH_TOKEN')
SECRET_KEY = os.environ.get('PROD_SECRET_KEY')
UPLOADS_DEFAULT_DEST = os.environ.get('PROD_UPLOADS_DEFAULT_DEST')
UPLOADS_DEFAULT_URL = os.environ.get('PROD_UPLOADS_DEFAULT_URL')
if STAGING:
GITHUB_CLIENT_ID = os.environ.get('STAGING_GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = os.environ.get('STAGING_GITHUB_CLIENT_SECRET')
GITHUB_CALLBACK_URL = os.environ.get('STAGING_GITHUB_CALLBACK_URL')
HOOK_SECRET_KEY = os.environ.get('HOOK_SECRET_KEY')
ISSUES_REPO_URI = 'webcompat/webcompat-tests/issues'
OAUTH_TOKEN = os.environ.get('STAGING_OAUTH_TOKEN')
SECRET_KEY = os.environ.get('STAGING_SECRET_KEY')
UPLOADS_DEFAULT_DEST = os.environ.get('STAGING_UPLOADS_DEFAULT_DEST')
UPLOADS_DEFAULT_URL = os.environ.get('STAGING_UPLOADS_DEFAULT_URL')
# see secrets.py.example for the rest of the config values that need
# to be modified for localhost development
if LOCALHOST:
ISSUES_REPO_URI = 'webcompat/webcompat-tests/issues'
UPLOADS_DEFAULT_DEST = BASE_DIR + '/uploads/'
UPLOADS_DEFAULT_URL = 'http://localhost:5000/uploads/'
# BUG STATUS
# The id will be initialized when the app is started.
STATUSES = {
u'needstriage': {'id': 0, 'order': 1, 'state': 'open'},
u'needsdiagnosis': {'id': 0, 'order': 2, 'state': 'open'},
u'needscontact': {'id': 0, 'order': 3, 'state': 'open'},
u'contactready': {'id': 0, 'order': 4, 'state': 'open'},
u'sitewait': {'id': 0, 'order': 5, 'state': 'open'},
u'duplicate': {'id': 0, 'order': 1, 'state': 'closed'},
u'fixed': {'id': 0, 'order': 2, 'state': 'closed'},
u'incomplete': {'id': 0, 'order': 3, 'state': 'closed'},
u'invalid': {'id': 0, 'order': 4, 'state': 'closed'},
u'non-compat': {'id': 0, 'order': 5, 'state': 'closed'},
u'wontfix': {'id': 0, 'order': 6, 'state': 'closed'},
u'worksforme': {'id': 0, 'order': 7, 'state': 'closed'}}
# We don't need to compute for every requests.
OPEN_STATUSES = [status for status in STATUSES
if STATUSES[status]['state'] == 'open']
# Messages Configuration
CSS_FIX_ME = """
This resource doesn't exist anymore.
See https://github.com/webcompat/css-fixme/
for more details."""
IS_BLACKLISTED_DOMAIN = (u'Anonymous reporting for domain {0} '
'is temporarily disabled. Please contact '
'[email protected] '
'for more details.')
SHOW_RATE_LIMIT = """
All those moments will be lost in time…
like tears in rain…
Time to die.
– Blade Runner
This resource doesn't exist anymore."""
WELL_KNOWN_ALL = """
Sorry dear bot,
the route /.well-known/{subpath} doesn't exist.
Nothing behind me, everything ahead of me, as is ever so on the road.
- Jack Kerouac, On the Road."""
WELL_KNOWN_SECURITY = """Contact: mailto:[email protected]
Contact: mailto:[email protected]
"""
# AB setup
# Comma separated list of user IDs to exempt from experiments
AB_EXEMPT_USERS = os.environ.get('AB_EXEMPT_USERS', '').split(',')
|
[] |
[] |
[
"PRODUCTION",
"HOOK_SECRET_KEY",
"STAGING_SECRET_KEY",
"STAGING_GITHUB_CLIENT_ID",
"AB_EXEMPT_USERS",
"STAGING_GITHUB_CALLBACK_URL",
"PROD_UPLOADS_DEFAULT_DEST",
"PROD_GITHUB_CALLBACK_URL",
"STAGING_UPLOADS_DEFAULT_URL",
"STAGING_GITHUB_CLIENT_SECRET",
"PROD_UPLOADS_DEFAULT_URL",
"PROD_GITHUB_CLIENT_SECRET",
"STAGING_OAUTH_TOKEN",
"STAGING_UPLOADS_DEFAULT_DEST",
"PROD_GITHUB_CLIENT_ID",
"PROD_OAUTH_TOKEN",
"PROD_SECRET_KEY",
"DEVELOPMENT"
] |
[]
|
["PRODUCTION", "HOOK_SECRET_KEY", "STAGING_SECRET_KEY", "STAGING_GITHUB_CLIENT_ID", "AB_EXEMPT_USERS", "STAGING_GITHUB_CALLBACK_URL", "PROD_UPLOADS_DEFAULT_DEST", "PROD_GITHUB_CALLBACK_URL", "STAGING_UPLOADS_DEFAULT_URL", "STAGING_GITHUB_CLIENT_SECRET", "PROD_UPLOADS_DEFAULT_URL", "PROD_GITHUB_CLIENT_SECRET", "STAGING_OAUTH_TOKEN", "STAGING_UPLOADS_DEFAULT_DEST", "PROD_GITHUB_CLIENT_ID", "PROD_OAUTH_TOKEN", "PROD_SECRET_KEY", "DEVELOPMENT"]
|
python
| 18 | 0 | |
cmd/ddltest/ddl_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"database/sql"
"database/sql/driver"
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/daiguadaidai/parser/model"
"github.com/daiguadaidai/parser/terror"
"github.com/daiguadaidai/tidb/domain"
"github.com/daiguadaidai/tidb/kv"
"github.com/daiguadaidai/tidb/session"
"github.com/daiguadaidai/tidb/sessionctx"
"github.com/daiguadaidai/tidb/store"
"github.com/daiguadaidai/tidb/store/tikv"
"github.com/daiguadaidai/tidb/table"
"github.com/daiguadaidai/tidb/types"
"github.com/daiguadaidai/tidb/util/logutil"
_ "github.com/go-sql-driver/mysql"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
log "github.com/sirupsen/logrus"
goctx "golang.org/x/net/context"
)
func TestDDL(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
var (
etcd = flag.String("etcd", "127.0.0.1:2379", "etcd path")
tidbIP = flag.String("tidb_ip", "127.0.0.1", "tidb-server ip address")
tikvPath = flag.String("tikv_path", "", "tikv path")
lease = flag.Int("lease", 1, "DDL schema lease time, seconds")
serverNum = flag.Int("server_num", 3, "Maximum running tidb server")
startPort = flag.Int("start_port", 5000, "First tidb-server listening port")
statusPort = flag.Int("status_port", 8000, "First tidb-server status port")
logLevel = flag.String("L", "error", "log level")
ddlServerLogLevel = flag.String("ddl_log_level", "debug", "DDL server log level")
dataNum = flag.Int("n", 100, "minimal test dataset for a table")
enableRestart = flag.Bool("enable_restart", true, "whether random restart servers for tests")
)
var _ = Suite(&TestDDLSuite{})
type server struct {
*exec.Cmd
logFP *os.File
db *sql.DB
addr string
}
type TestDDLSuite struct {
store kv.Storage
dom *domain.Domain
storePath string
s session.Session
ctx sessionctx.Context
m sync.Mutex
procs []*server
wg sync.WaitGroup
quit chan struct{}
retryCount int
}
func (s *TestDDLSuite) SetUpSuite(c *C) {
logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
s.quit = make(chan struct{})
var err error
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
c.Assert(err, IsNil)
// Make sure the schema lease of this session is equal to other TiDB servers'.
session.SetSchemaLease(time.Duration(*lease) * time.Second)
dom, err := session.BootstrapSession(s.store)
c.Assert(err, IsNil)
s.dom = dom
s.s, err = session.CreateSession(s.store)
c.Assert(err, IsNil)
s.ctx = s.s.(sessionctx.Context)
goCtx := goctx.Background()
_, err = s.s.Execute(goCtx, "create database if not exists test_ddl")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "use test_ddl")
c.Assert(err, IsNil)
s.Bootstrap(c)
// Stop current DDL worker, so that we can't be the owner now.
err = domain.GetDomain(s.ctx).DDL().Stop()
c.Assert(err, IsNil)
addEnvPath("..")
// Start multi tidb servers
s.procs = make([]*server, *serverNum)
// Set server restart retry count.
s.retryCount = 5
createLogFiles(c, *serverNum)
err = s.startServers()
c.Assert(err, IsNil)
s.wg.Add(1)
go s.restartServerRegularly()
}
// restartServerRegularly restarts a tidb server regularly.
func (s *TestDDLSuite) restartServerRegularly() {
defer s.wg.Done()
var err error
after := *lease * (6 + randomIntn(6))
for {
select {
case <-time.After(time.Duration(after) * time.Second):
if *enableRestart {
err = s.restartServerRand()
if err != nil {
log.Fatalf("restartServerRand failed, err %v", errors.ErrorStack(err))
}
}
case <-s.quit:
return
}
}
}
func (s *TestDDLSuite) TearDownSuite(c *C) {
close(s.quit)
s.wg.Wait()
s.dom.Close()
// TODO: Remove these logs after testing.
quitCh := make(chan struct{})
go func() {
select {
case <-time.After(100 * time.Second):
buf := make([]byte, 2<<20)
size := runtime.Stack(buf, true)
log.Errorf("%s", buf[:size])
case <-quitCh:
}
}()
err := s.store.Close()
c.Assert(err, IsNil)
close(quitCh)
err = s.stopServers()
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) startServers() (err error) {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
continue
}
// Open log file.
logFP, err := os.OpenFile(fmt.Sprintf("%s%d", logFilePrefix, i), os.O_RDWR, 0766)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, logFP)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *TestDDLSuite) killServer(proc *os.Process) error {
// Make sure this tidb is killed, and it makes the next tidb that has the same port as this one start quickly.
err := proc.Kill()
if err != nil {
log.Errorf("kill server failed err %v", err)
return errors.Trace(err)
}
_, err = proc.Wait()
if err != nil {
log.Errorf("kill server, wait failed err %v", err)
return errors.Trace(err)
}
time.Sleep(1 * time.Second)
return nil
}
func (s *TestDDLSuite) stopServers() error {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
err := s.killServer(s.procs[i].Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i] = nil
}
}
return nil
}
var logFilePrefix = "tidb_log_file_"
func createLogFiles(c *C, length int) {
for i := 0; i < length; i++ {
fp, err := os.Create(fmt.Sprintf("%s%d", logFilePrefix, i))
if err != nil {
c.Assert(err, IsNil)
}
fp.Close()
}
}
func (s *TestDDLSuite) startServer(i int, fp *os.File) (*server, error) {
var cmd *exec.Cmd
cmd = exec.Command("ddltest_tidb-server",
"--store=tikv",
fmt.Sprintf("-L=%s", *ddlServerLogLevel),
fmt.Sprintf("--path=%s%s", *etcd, *tikvPath),
fmt.Sprintf("-P=%d", *startPort+i),
fmt.Sprintf("--status=%d", *statusPort+i),
fmt.Sprintf("--lease=%d", *lease))
cmd.Stderr = fp
cmd.Stdout = fp
err := cmd.Start()
if err != nil {
return nil, errors.Trace(err)
}
time.Sleep(500 * time.Millisecond)
// Make sure tidb server process is started.
ps := fmt.Sprintf("ps -aux|grep ddltest_tidb|grep %d", *startPort+i)
output, _ := exec.Command("sh", "-c", ps).Output()
if !strings.Contains(string(output), "ddltest_tidb-server") {
time.Sleep(1 * time.Second)
}
// Open database.
var db *sql.DB
addr := fmt.Sprintf("%s:%d", *tidbIP, *startPort+i)
sleepTime := time.Millisecond * 250
startTime := time.Now()
for i := 0; i < s.retryCount; i++ {
db, err = sql.Open("mysql", fmt.Sprintf("root@(%s)/test_ddl", addr))
if err != nil {
log.Warnf("open addr %v failed, retry count %d err %v", addr, i, err)
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
db.Close()
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Errorf("restart server addr %v failed %v, take time %v", addr, err, time.Since(startTime))
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
_, err = db.Exec("use test_ddl")
if err != nil {
return nil, errors.Trace(err)
}
log.Infof("start server %s ok %v", addr, err)
return &server{
Cmd: cmd,
db: db,
addr: addr,
logFP: fp,
}, nil
}
func (s *TestDDLSuite) restartServerRand() error {
i := rand.Intn(*serverNum)
s.m.Lock()
defer s.m.Unlock()
if s.procs[i] == nil {
return nil
}
server := s.procs[i]
s.procs[i] = nil
log.Warnf("begin to restart %s", server.addr)
err := s.killServer(server.Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, server.logFP)
return errors.Trace(err)
}
func isRetryError(err error) bool {
if err == nil {
return false
}
if terror.ErrorEqual(err, driver.ErrBadConn) ||
strings.Contains(err.Error(), "connection refused") ||
strings.Contains(err.Error(), "getsockopt: connection reset by peer") ||
strings.Contains(err.Error(), "KV error safe to retry") ||
strings.Contains(err.Error(), "try again later") {
return true
}
// TODO: Check the specific columns number.
if strings.Contains(err.Error(), "Column count doesn't match value count at row") {
log.Warnf("err is %v", err)
return false
}
log.Errorf("err is %v, can not retry", err)
return false
}
func (s *TestDDLSuite) exec(query string, args ...interface{}) (sql.Result, error) {
for {
server := s.getServer()
r, err := server.db.Exec(query, args...)
if isRetryError(err) {
log.Errorf("exec %s in server %s err %v, retry", query, err, server.addr)
continue
}
return r, err
}
}
func (s *TestDDLSuite) mustExec(c *C, query string, args ...interface{}) sql.Result {
r, err := s.exec(query, args...)
if err != nil {
log.Fatalf("[mustExec fail]query - %v %v, error - %v", query, args, err)
}
return r
}
func (s *TestDDLSuite) execInsert(c *C, query string, args ...interface{}) sql.Result {
for {
r, err := s.exec(query, args...)
if err == nil {
return r
}
if *enableRestart {
// If use enable random restart servers, we should ignore key exists error.
if strings.Contains(err.Error(), "Duplicate entry") &&
strings.Contains(err.Error(), "for key") {
return r
}
}
log.Fatalf("[execInsert fail]query - %v %v, error - %v", query, args, err)
}
}
func (s *TestDDLSuite) query(query string, args ...interface{}) (*sql.Rows, error) {
for {
server := s.getServer()
r, err := server.db.Query(query, args...)
if isRetryError(err) {
log.Errorf("query %s in server %s err %v, retry", query, err, server.addr)
continue
}
return r, err
}
}
func (s *TestDDLSuite) mustQuery(c *C, query string, args ...interface{}) *sql.Rows {
r, err := s.query(query, args...)
if err != nil {
log.Fatalf("[mustQuery fail]query - %v %v, error - %v", query, args, err)
}
return r
}
func (s *TestDDLSuite) getServer() *server {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < 20; i++ {
i := rand.Intn(*serverNum)
if s.procs[i] != nil {
return s.procs[i]
}
}
log.Fatalf("try to get server too many times")
return nil
}
// runDDL executes the DDL query, returns a channel so that you can use it to wait DDL finished.
func (s *TestDDLSuite) runDDL(sql string) chan error {
done := make(chan error, 1)
go func() {
_, err := s.s.Execute(goctx.Background(), sql)
// We must wait 2 * lease time to guarantee all servers update the schema.
if err == nil {
time.Sleep(time.Duration(*lease) * time.Second * 2)
}
done <- err
}()
return done
}
func (s *TestDDLSuite) getTable(c *C, name string) table.Table {
tbl, err := domain.GetDomain(s.ctx).InfoSchema().TableByName(model.NewCIStr("test_ddl"), model.NewCIStr(name))
c.Assert(err, IsNil)
return tbl
}
func dumpRows(c *C, rows *sql.Rows) [][]interface{} {
cols, err := rows.Columns()
c.Assert(err, IsNil)
var ay [][]interface{}
for rows.Next() {
v := make([]interface{}, len(cols))
for i := range v {
v[i] = new(interface{})
}
err = rows.Scan(v...)
c.Assert(err, IsNil)
for i := range v {
v[i] = *(v[i].(*interface{}))
}
ay = append(ay, v)
}
rows.Close()
c.Assert(rows.Err(), IsNil, Commentf("%v", ay))
return ay
}
func matchRows(c *C, rows *sql.Rows, expected [][]interface{}) {
ay := dumpRows(c, rows)
c.Assert(len(ay), Equals, len(expected), Commentf("%v", expected))
for i := range ay {
match(c, ay[i], expected[i]...)
}
}
func match(c *C, row []interface{}, expected ...interface{}) {
c.Assert(len(row), Equals, len(expected))
for i := range row {
if row[i] == nil {
c.Assert(expected[i], IsNil)
continue
}
got, err := types.ToString(row[i])
c.Assert(err, IsNil)
need, err := types.ToString(expected[i])
c.Assert(err, IsNil)
c.Assert(got, Equals, need)
}
}
func (s *TestDDLSuite) Bootstrap(c *C) {
goCtx := goctx.Background()
// Initialize test data, you must use session to do it
_, err := s.s.Execute(goCtx, "use test_ddl")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "drop table if exists test_index, test_column, test_insert, test_conflict_insert, "+
"test_update, test_conflict_update, test_delete, test_conflict_delete, test_mixed, test_inc")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_column (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_insert (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_insert (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_update (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_update (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_delete (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_conflict_delete (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_mixed (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
_, err = s.s.Execute(goCtx, "create table test_inc (c1 int, c2 int, primary key(c1))")
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) TestSimple(c *C) {
done := s.runDDL("create table if not exists test_simple (c1 int, c2 int, c3 int)")
err := <-done
c.Assert(err, IsNil)
_, err = s.exec("insert into test_simple values (1, 1, 1)")
c.Assert(err, IsNil)
rows, err := s.query("select c1 from test_simple limit 1")
c.Assert(err, IsNil)
matchRows(c, rows, [][]interface{}{{1}})
done = s.runDDL("drop table if exists test_simple")
err = <-done
c.Assert(err, IsNil)
}
func (s *TestDDLSuite) TestSimpleInsert(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_insert values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_insert")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount, Commentf("%d %d", len(handles), rowCount))
}
func (s *TestDDLSuite) TestSimpleConflictInsert(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.exec(fmt.Sprintf("insert into test_conflict_insert values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}()
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_insert")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(len(handles), Equals, len(keysMap))
}
func (s *TestDDLSuite) TestSimpleUpdate(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_update values (%d, %d)", k, k))
v := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("update test_update set c2 = %d where c1 = %d", v, k))
mu.Lock()
keysMap[int64(k)] = int64(v)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleUpdate][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_update")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
key := data[0].GetInt64()
c.Assert(data[1].GetValue(), Equals, keysMap[key])
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount)
}
func (s *TestDDLSuite) TestSimpleConflictUpdate(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_conflict_update values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("update test_conflict_update set c2 = %d where c1 = %d", defaultValue, k))
mu.Lock()
keysMap[int64(k)] = int64(defaultValue)
mu.Unlock()
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_update")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
if !reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) && !reflect.DeepEqual(data[1].GetValue(), defaultValue) {
log.Fatalf("[TestSimpleConflictUpdate fail]Bad row: %v", data)
}
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, rowCount)
}
func (s *TestDDLSuite) TestSimpleDelete(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_delete values (%d, %d)", k, k))
s.mustExec(c, fmt.Sprintf("delete from test_delete where c1 = %d", k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleDelete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_delete")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
return true, nil
})
c.Assert(err, IsNil)
c.Assert(handles, HasLen, 0)
}
func (s *TestDDLSuite) TestSimpleConflictDelete(c *C) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_conflict_delete values (%d, %d)", k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictDelete][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(c, fmt.Sprintf("delete from test_conflict_delete where c1 = %d", k))
mu.Lock()
delete(keysMap, int64(k))
mu.Unlock()
}
}(i)
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictDelete][Delete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_conflict_delete")
handles := make(map[int64]struct{})
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
handles[h] = struct{}{}
c.Assert(keysMap, HasKey, data[0].GetValue())
return true, nil
})
c.Assert(err, IsNil)
c.Assert(len(handles), Equals, len(keysMap))
}
func (s *TestDDLSuite) TestSimpleMixed(c *C) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_mixed values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleMixed][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
rowID := int64(rowCount)
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
key := atomic.AddInt64(&rowID, 1)
s.execInsert(c, fmt.Sprintf("insert into test_mixed values (%d, %d)", key, key))
key = int64(randomNum(rowCount))
s.mustExec(c, fmt.Sprintf("update test_mixed set c2 = %d where c1 = %d", defaultValue, key))
key = int64(randomNum(rowCount))
s.mustExec(c, fmt.Sprintf("delete from test_mixed where c1 = %d", key))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleMixed][Mixed][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_mixed")
updateCount := int64(0)
insertCount := int64(0)
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
updateCount++
} else {
log.Fatalf("[TestSimpleMixed fail]invalid row: %v", data)
}
return true, nil
})
c.Assert(err, IsNil)
deleteCount := atomic.LoadInt64(&rowID) - insertCount - updateCount
c.Assert(insertCount, Greater, int64(0))
c.Assert(updateCount, Greater, int64(0))
c.Assert(deleteCount, Greater, int64(0))
}
func (s *TestDDLSuite) TestSimpleInc(c *C) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(c, fmt.Sprintf("insert into test_inc values (%d, %d)", k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInc][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
s.mustExec(c, fmt.Sprintf("update test_inc set c2 = c2 + 1 where c1 = 0"))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleInc][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
c.Assert(err, IsNil)
tbl := s.getTable(c, "test_inc")
err = tbl.IterRecords(ctx, tbl.FirstKey(), tbl.Cols(), func(h int64, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[0].GetValue(), int64(0)) {
if *enableRestart {
c.Assert(data[1].GetValue(), GreaterEqual, int64(rowCount))
} else {
c.Assert(data[1].GetValue(), Equals, int64(rowCount))
}
} else {
c.Assert(data[0].GetValue(), Equals, data[1].GetValue())
}
return true, nil
})
c.Assert(err, IsNil)
}
// addEnvPath appends newPath to $PATH.
func addEnvPath(newPath string) {
os.Setenv("PATH", fmt.Sprintf("%s%c%s", os.Getenv("PATH"), os.PathListSeparator, newPath))
}
func init() {
rand.Seed(time.Now().UnixNano())
store.Register("tikv", tikv.Driver{})
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.conf;
import com.google.common.base.Joiner;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.SizeValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
import org.apache.hive.common.HiveCompat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.LoginException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URL;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Hive Configuration.
*/
public class HiveConf extends Configuration {
protected String hiveJar;
protected Properties origProp;
protected String auxJars;
private static final Logger l4j = LoggerFactory.getLogger(HiveConf.class);
private static boolean loadMetastoreConfig = false;
private static boolean loadHiveServer2Config = false;
private static URL hiveDefaultURL = null;
private static URL hiveSiteURL = null;
private static URL hivemetastoreSiteUrl = null;
private static URL hiveServer2SiteUrl = null;
private static byte[] confVarByteArray = null;
private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
private static final Map<String, ConfVars> metaConfs = new HashMap<String, ConfVars>();
private final List<String> restrictList = new ArrayList<String>();
private final Set<String> hiddenSet = new HashSet<String>();
private Pattern modWhiteListPattern = null;
private volatile boolean isSparkConfigUpdated = false;
private static final int LOG_PREFIX_LENGTH = 64;
public boolean getSparkConfigUpdated() {
return isSparkConfigUpdated;
}
public void setSparkConfigUpdated(boolean isSparkConfigUpdated) {
this.isSparkConfigUpdated = isSparkConfigUpdated;
}
public interface EncoderDecoder<K, V> {
V encode(K key);
K decode(V value);
}
public static class URLEncoderDecoder implements EncoderDecoder<String, String> {
private static final String UTF_8 = "UTF-8";
@Override
public String encode(String key) {
try {
return URLEncoder.encode(key, UTF_8);
} catch (UnsupportedEncodingException e) {
return key;
}
}
@Override
public String decode(String value) {
try {
return URLDecoder.decode(value, UTF_8);
} catch (UnsupportedEncodingException e) {
return value;
}
}
}
public static class EncoderDecoderFactory {
public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder();
}
static {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = HiveConf.class.getClassLoader();
}
hiveDefaultURL = classLoader.getResource("hive-default.xml");
// Look for hive-site.xml on the CLASSPATH and log its location if found.
hiveSiteURL = findConfigFile(classLoader, "hive-site.xml", true);
hivemetastoreSiteUrl = findConfigFile(classLoader, "hivemetastore-site.xml", false);
hiveServer2SiteUrl = findConfigFile(classLoader, "hiveserver2-site.xml", false);
for (ConfVars confVar : ConfVars.values()) {
vars.put(confVar.varname, confVar);
}
Set<String> llapDaemonConfVarsSetLocal = new LinkedHashSet<>();
populateLlapDaemonVarsSet(llapDaemonConfVarsSetLocal);
llapDaemonVarsSet = Collections.unmodifiableSet(llapDaemonConfVarsSetLocal);
}
private static URL findConfigFile(ClassLoader classLoader, String name, boolean doLog) {
URL result = classLoader.getResource(name);
if (result == null) {
String confPath = System.getenv("HIVE_CONF_DIR");
result = checkConfigFile(new File(confPath, name));
if (result == null) {
String homePath = System.getenv("HIVE_HOME");
String nameInConf = "conf" + File.pathSeparator + name;
result = checkConfigFile(new File(homePath, nameInConf));
if (result == null) {
URI jarUri = null;
try {
jarUri = HiveConf.class.getProtectionDomain().getCodeSource().getLocation().toURI();
} catch (Throwable e) {
if (l4j.isInfoEnabled()) {
l4j.info("Cannot get jar URI", e);
}
System.err.println("Cannot get jar URI: " + e.getMessage());
}
result = checkConfigFile(new File(new File(jarUri).getParentFile(), nameInConf));
}
}
}
if (doLog && l4j.isInfoEnabled()) {
l4j.info("Found configuration file " + result);
}
return result;
}
private static URL checkConfigFile(File f) {
try {
return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
} catch (Throwable e) {
if (l4j.isInfoEnabled()) {
l4j.info("Error looking for config " + f, e);
}
System.err.println("Error looking for config " + f + ": " + e.getMessage());
return null;
}
}
@InterfaceAudience.Private
public static final String PREFIX_LLAP = "llap.";
@InterfaceAudience.Private
public static final String PREFIX_HIVE_LLAP = "hive.llap.";
/**
* Metastore related options that the db is initialized against. When a conf
* var in this is list is changed, the metastore instance for the CLI will
* be recreated so that the change will take effect.
*/
public static final HiveConf.ConfVars[] metaVars = {
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.REPLDIR,
HiveConf.ConfVars.METASTOREURIS,
HiveConf.ConfVars.METASTORE_SERVER_PORT,
HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME,
HiveConf.ConfVars.METASTOREPWD,
HiveConf.ConfVars.METASTORECONNECTURLHOOK,
HiveConf.ConfVars.METASTORECONNECTURLKEY,
HiveConf.ConfVars.METASTORESERVERMINTHREADS,
HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE,
HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
HiveConf.ConfVars.METASTORE_DATANUCLEUS_INIT_COL_INFO,
HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL,
HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_EVENT_MESSAGE_FACTORY,
HiveConf.ConfVars.METASTORE_FILTER_HOOK,
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX,
HiveConf.ConfVars.METASTORE_INIT_HOOKS,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
HiveConf.ConfVars.HMSHANDLERATTEMPTS,
HiveConf.ConfVars.HMSHANDLERINTERVAL,
HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
HiveConf.ConfVars.HIVE_TXN_MANAGER,
HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES,
HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER,
HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
HiveConf.ConfVars.METASTORE_FASTPATH,
HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE,
HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY,
HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE,
HiveConf.ConfVars.METASTORE_HBASE_CACHE_TIME_TO_LIVE,
HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_WRITER_WAIT,
HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_READER_WAIT,
HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_FULL,
HiveConf.ConfVars.METASTORE_HBASE_CACHE_CLEAN_UNTIL,
HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS,
HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES,
HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL,
HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY,
HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL,
HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS
};
/**
* User configurable Metastore vars
*/
public static final HiveConf.ConfVars[] metaConfVars = {
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL,
HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL,
HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
HiveConf.ConfVars.METASTORE_CAPABILITY_CHECK
};
static {
for (ConfVars confVar : metaConfVars) {
metaConfs.put(confVar.varname, confVar);
}
}
public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal";
public static final String HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME =
"hive.server2.authentication.ldap.userMembershipKey";
/**
* dbVars are the parameters can be set per database. If these
* parameters are set as a database property, when switching to that
* database, the HiveConf variable will be changed. The change of these
* parameters will effectively change the DFS and MapReduce clusters
* for different databases.
*/
public static final HiveConf.ConfVars[] dbVars = {
HiveConf.ConfVars.HADOOPBIN,
HiveConf.ConfVars.METASTOREWAREHOUSE,
HiveConf.ConfVars.SCRATCHDIR
};
/**
* Variables used by LLAP daemons.
* TODO: Eventually auto-populate this based on prefixes. The conf variables
* will need to be renamed for this.
*/
private static final Set<String> llapDaemonVarsSet;
private static void populateLlapDaemonVarsSet(Set<String> llapDaemonVarsSetLocal) {
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MODE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_DIRECT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_USE_LRFU.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_KEYTAB_FILE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_KERBEROS_PRINCIPAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_KERBEROS_KEYTAB_FILE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_ZK_CONNECTION_STRING.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL_DENY.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_WEB_AUTO_AUTH.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WORK_DIRS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_XMX_HEADROOM.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_REFRESH_INTERVAL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_VALIDATE_ACLS.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_LOGGER.varname);
llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_USE_FQDN.varname);
}
/**
* Get a set containing configuration parameter names used by LLAP Server isntances
* @return an unmodifiable set containing llap ConfVars
*/
public static final Set<String> getLlapDaemonConfVars() {
return llapDaemonVarsSet;
}
/**
* ConfVars.
*
* These are the default configuration properties for Hive. Each HiveConf
* object is initialized as follows:
*
* 1) Hadoop configuration properties are applied.
* 2) ConfVar properties with non-null values are overlayed.
* 3) hive-site.xml properties are overlayed.
*
* WARNING: think twice before adding any Hadoop configuration properties
* with non-null values to this list as they will override any values defined
* in the underlying Hadoop configuration.
*/
public static enum ConfVars {
// QL execution stuff
SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
PLAN("hive.exec.plan", "", ""),
STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
"Directory name that will be created inside table locations in order to support HDFS encryption. " +
"This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
"In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
"HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
"For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
"with ${hive.scratch.dir.permission}."),
REPLDIR("hive.repl.rootdir","/user/hive/repl/",
"HDFS root dir for all replication dumps."),
REPLCMENABLED("hive.repl.cm.enabled", false,
"Turn on ChangeManager, so delete files will go to cmrootdir."),
REPLCMDIR("hive.repl.cmrootdir","/user/hive/cmroot/",
"Root dir for ChangeManager, used for deleted files."),
REPLCMRETIAN("hive.repl.cm.retain","24h",
new TimeValidator(TimeUnit.HOURS),
"Time to retain removed files in cmrootdir."),
REPLCMINTERVAL("hive.repl.cm.interval","3600s",
new TimeValidator(TimeUnit.SECONDS),
"Inteval for cmroot cleanup thread."),
LOCALSCRATCHDIR("hive.exec.local.scratchdir",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Local scratch space for Hive jobs"),
DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
"${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources",
"Temporary local directory for added resources in the remote file system."),
SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700",
"The permission for the user specific scratch directories that get created."),
SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true,
"Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" +
"separate JVM (true recommended) or not. \n" +
"Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."),
SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
"Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
"This prevents runaway scripts from filling logs partitions to capacity"),
ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
"When enabled, this option allows a user script to exit successfully without consuming \n" +
"all the data from the standard input."),
STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
"Streaming jobs that log to standard error with this prefix can log counter or status information."),
STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
"Enable consumption of status and counter messages for streaming jobs."),
COMPRESSRESULT("hive.exec.compress.output", false,
"This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
"This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
"The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000),
"size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."),
MAXREDUCERS("hive.exec.reducers.max", 1009,
"max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
"negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
PREEXECHOOKS("hive.exec.pre.hooks", "",
"Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
"A pre-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
POSTEXECHOOKS("hive.exec.post.hooks", "",
"Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
"A post-execution hook is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
ONFAILUREHOOKS("hive.exec.failure.hooks", "",
"Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
"An on-failure hook is specified as the name of Java class which implements the \n" +
"org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "",
"Comma-separated list of hooks to be invoked for each query which can \n" +
"tranform the query before it's placed in the job.xml file. Must be a Java class which \n" +
"extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."),
CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
"Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
"A client stats publisher is specified as the name of a Java class which implements the \n" +
"org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
ATSHOOKQUEUECAPACITY("hive.ats.hook.queue.capacity", 64,
"Queue size for the ATS Hook executor. If the number of outstanding submissions \n" +
"to the ATS executor exceed this amount, the Hive ATS Hook will not try to log queries to ATS."),
EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
"How many jobs at most can be executed in parallel"),
HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
"Whether speculative execution for reducers should be turned on. "),
HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
"The interval with which to poll the JobTracker for the counters the running job. \n" +
"The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
"Whether or not to allow dynamic partitions in DML/DDL."),
DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
"In strict mode, the user must specify at least one static partition\n" +
"in case the user accidentally overwrites all partitions.\n" +
"In nonstrict mode all partitions are allowed to be dynamic."),
DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
"Maximum number of dynamic partitions allowed to be created in total."),
DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
"Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
"Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
"The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
"This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
"The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
// Whether to show a link to the most failed task + debugging tips
SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
"If a job fails, whether to provide a link in the CLI to the task with the\n" +
"most failures, along with debugging hints if applicable."),
JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
"Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
"for each failed job should be stored in the SessionState"),
JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
"String used as a file extension for output files. \n" +
"If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
HIVE_IN_TEST_SHORT_LOGS("hive.in.test.short.logs", false,
"internal usage only, used only in test mode. If set true, when requesting the " +
"operation logs the short version (generated by LogDivertAppenderForTest) will be " +
"returned"),
HIVE_IN_TEST_REMOVE_LOGS("hive.in.test.remove.logs", true,
"internal usage only, used only in test mode. If set false, the operation logs, and the " +
"operation log directory will not be removed, so they can be found after the test runs."),
HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez",
true),
LOCALMODEAUTO("hive.exec.mode.local.auto", false,
"Let Hive determine whether to run in local mode automatically"),
LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
"When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
"Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
"maximum number of lines for footer user can define for a table file"),
HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true,
"Make column names unique in the result set by qualifying column names with table alias if needed.\n" +
"Table alias will be added to column names for queries of type \"select *\" or \n" +
"if query explicitly uses table alias \"select r1.x..\"."),
// Hadoop Configuration Properties
// Properties with null values are ignored and exist only for the purpose of giving us
// a symbolic name to reference in the Hive source code. Properties with non-null
// values will override any values set in the underlying Hadoop configuration.
HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
YARNBIN("yarn.bin.path", findYarnBinary(), "", true),
HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
"The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true),
MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true),
MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true),
MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true),
// The number of reduce tasks per job. Hadoop sets this value to 1 by default
// By setting this property to -1, Hive will automatically determine the correct
// number of reducers.
HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true),
// Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"),
"Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."),
METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
"location of default database for the warehouse"),
METASTOREURIS("hive.metastore.uris", "",
"Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true,
"Whether to check client capabilities for potentially breaking API usage."),
METASTORE_FASTPATH("hive.metastore.fastpath", false,
"Used to avoid all of the proxies and object copies in the metastore. Note, if this is " +
"set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " +
"undefined and most likely undesired behavior will result"),
METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15,
"Number of threads to be allocated for metastore handler for fs operations."),
METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " +
"objects we will place in the hbase metastore catalog cache. The objects will be divided up by " +
"types that we need to cache."),
METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000,
"Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."),
METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000,
"Maximum number of partitions that are aggregated per cache node."),
METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability",
(float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1,
"Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."),
METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for a cached node to be active in the cache before they become stale."),
METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a writer will wait to acquire the writelock before giving up."),
METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a reader will wait to acquire the readlock before giving up."),
METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9,
"Maximum cache full % after which the cache cleaner thread kicks in."),
METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8,
"The cleaner thread cleans until cache reaches this % full size."),
METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class",
"org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection",
"Class used to connection to HBase"),
METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries",
10000, "How many in stats objects to cache in memory"),
METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds stats objects live in memory after they are read from HBase."),
METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY(
"hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s",
new TimeValidator(TimeUnit.SECONDS),
"How often the stats cache scans its HBase entries and looks for expired entries"),
METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds stats entries live in HBase cache after they are created. They may be" +
" invalided by updates or partition drops before this. Default is one week."),
METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1,
"Number of threads to use to read file metadata in background to cache it."),
METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
"Number of retries while opening a connection to metastore"),
METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
"Number of retries upon failure of Thrift metastore calls"),
METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for the client to wait between consecutive connection attempts"),
METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket timeout in seconds"),
METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s",
new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
"reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
"has an infinite lifetime."),
METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
"Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
"Set this to true if multiple threads access metastore through JDO concurrently."),
METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
"jdbc:derby:;databaseName=metastore_db;create=true",
"JDBC connect string for a JDBC metastore.\n" +
"To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
"For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "",
"Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
"enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10,
"The number of times to retry a HMSHandler call if there were a connection error."),
HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
"datastore. Once reloaded, this value is reset to false. Used for\n" +
"testing only."),
METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L,
"Maximum message size in bytes a HMS will accept."),
METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
"Minimum number of worker threads in the Thrift server's pool."),
METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000,
"Maximum number of worker threads in the Thrift server's pool."),
METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
"_INTERMEDIATE_ORIGINAL",
"Intermediate dir suffixes used for archiving. Not important what they\n" +
"are, as long as collisions are avoided"),
METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
"_INTERMEDIATE_ARCHIVED", ""),
METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
"_INTERMEDIATE_EXTRACTED", ""),
METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
"The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
"hive-metastore/[email protected]",
"The service principal for the metastore Thrift server. \n" +
"The special string _HOST will be replaced automatically with the correct host name."),
METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
"If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
"If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false,
"If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
"Setting it to true will break compatibility with older clients running TBinaryProtocol."),
METASTORE_TOKEN_SIGNATURE("hive.metastore.token.signature", "",
"The delegation token service name to match when selecting a token from the current user's tokens."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
"org.apache.hadoop.hive.thrift.MemoryTokenStore",
"The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
"hive.cluster.delegation.token.store.zookeeper.connectString", "",
"The ZooKeeper token store connect string. You can re-use the configuration value\n" +
"set in hive.zookeeper.quorum, by leaving this parameter unset."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
"hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation",
"The root path for token store data. Note that this is used by both HiveServer2 and\n" +
"MetaStore to store delegation Token. One directory gets created for each of them.\n" +
"The final directory names would have the servername appended to it (HIVESERVER2,\n" +
"METASTORE)."),
METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
"hive.cluster.delegation.token.store.zookeeper.acl", "",
"ACL for token store entries. Comma separated list of ACL entries. For example:\n" +
"sasl:hive/[email protected]:cdrwa,sasl:hive/[email protected]:cdrwa\n" +
"Defaults to all permissions for the hiveserver2/metastore process user."),
METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
"List of comma separated metastore object types that should be pinned in the cache"),
METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP",
"HikariCP", "NONE"),
"Specify connection pool library for datanucleus"),
METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10,
"Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
"2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
"recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
"configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
"(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
"(2 * physical_core_count + hard_disk_count)."),
// Workaround for DN bug on Postgres:
// http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset
METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE",
"initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false,
"validates existing schema against code. turn this on if you want to verify existing schema"),
METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false,
"Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+ "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+ "recommended for production use cases, run schematool command instead." ),
METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", true,
"Enforce metastore schema version consistency.\n" +
"True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic\n" +
" schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
" proper metastore schema migration. (Default)\n" +
"False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", false,
"When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
" enabled the MS will be unusable."),
METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
"Default transaction isolation level for identity generation."),
METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
"Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
"Name of the identifier factory to use when generating table/column names etc. \n" +
"'datanucleus1' is used for backward compatibility with DataNucleus v1"),
METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
"Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
"Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
"The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
"but it may also cause higher memory requirement at the client side."),
METASTORE_BATCH_RETRIEVE_OBJECTS_MAX(
"hive.metastore.batch.retrieve.table.partition.max", 1000,
"Maximum number of objects that metastore internally retrieves in one batch."),
METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
"A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
"An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
"List of comma separated listeners for metastore events."),
METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
"Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "",
"A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
" interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"time after which events will be removed from the database listener queue"),
METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
"Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
"for operations like drop-partition (disallow the drop-partition if the user in\n" +
"question doesn't have permissions to delete the corresponding directory\n" +
"on the storage)."),
METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK("hive.metastore.authorization.storage.check.externaltable.drop", true,
"Should StorageBasedAuthorization check permission of the storage before dropping external table.\n" +
"StorageBasedAuthorization already does this check for managed table. For external table however,\n" +
"anyone who has read permission of the directory could drop external table, which is surprising.\n" +
"The flag is set to false by default to maintain backward compatibility."),
METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Frequency at which timer task runs to purge expired events in metastore."),
METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Duration after which events expire from events table"),
METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory",
"org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
"Factory class for making encoding and decoding messages in the events generated."),
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
"both the client and server sides. Further note that its best effort. \n" +
"If client sets its to true and server sets it to false, client setting will be ignored."),
METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
"Partition names will be checked against this regex pattern and rejected if not matched."),
METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
"Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
"improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
"However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
"leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
"is also irrelevant."),
METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true,
"Whether the Hive metastore should try to use direct SQL queries instead of the\n" +
"DataNucleus for certain read paths. This can improve metastore performance when\n" +
"fetching many partitions or column statistics by orders of magnitude; however, it\n" +
"is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
"the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
"work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
"metastore is backed by MongoDB), you might want to disable this to save the\n" +
"try-and-fall-back cost."),
METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0,
"Batch size for partition and other object retrieval from the underlying DB in direct\n" +
"SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
"that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
"may impede performance. -1 means no batching, 0 means automatic batching."),
METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true,
"Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
"modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
"select query has incorrect syntax or something similar inside a transaction, the\n" +
"entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
"should disable the usage of direct SQL inside transactions if that happens in your case."),
METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" +
" size of a query string (in KB)."),
METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000,
"The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
" multiple OR separated IN clauses."),
METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause",
1000, "The maximum number of values in a VALUES clause for INSERT statement."),
METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
"Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
"either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
"as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
"pruning is the correct behaviour"),
METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
"hive.metastore.disallow.incompatible.col.type.changes", true,
"If true (default is false), ALTER TABLE operations which change the type of a\n" +
"column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
"RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
"datatypes can be converted from string to any type. The map is also serialized as\n" +
"a string, which can be read as a string as well. However, with any binary\n" +
"serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
"when subsequently trying to access old partitions.\n" +
"\n" +
"Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
"not blocked.\n" +
"\n" +
"See HIVE-4409 for more details."),
METASTORE_LIMIT_PARTITION_REQUEST("hive.metastore.limit.partition.request", -1,
"This limits the number of partitions that can be requested from the metastore for a given table.\n" +
"The default value \"-1\" means no limit."),
NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
"Default property values for newly created tables"),
DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
"Table Properties to copy over when executing a Create Table Like."),
METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
"This class is used to store and retrieval of raw metadata objects such as table, database"),
METASTORE_CACHED_RAW_STORE_IMPL("hive.metastore.cached.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
"Name of the wrapped RawStore class"),
METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY(
"hive.metastore.cached.rawstore.cache.update.frequency", "60", new TimeValidator(
TimeUnit.SECONDS),
"The time after which metastore cache is updated from metastore DB."),
METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl",
"org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
"Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " +
"class is used to store and retrieve transactions and locks"),
METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
"Driver class name for a JDBC metastore"),
METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
"org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
"class implementing the jdo persistence"),
METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
"org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
"Detaches all objects from session so that they can be used after transaction is committed"),
METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
"Reads outside of transactions"),
METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
"Username to use against metastore database"),
METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
"List of comma separated listeners for the end of metastore functions."),
METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
"List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
"* implies all the keys will get inherited."),
METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
"Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+ "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" +
" to fire events for DML operations"),
METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true,
"Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " +
"or drops partitions iteratively"),
METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", true,
"Whether aggregate stats caching is enabled or not."),
METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000,
"Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000,
"Maximum number of partitions that are aggregated per cache node."),
METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01,
"Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01,
"Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for a cached node to be active in the cache before they become stale."),
METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a writer will wait to acquire the writelock before giving up."),
METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Number of milliseconds a reader will wait to acquire the readlock before giving up."),
METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9,
"Maximum cache full % after which the cache cleaner thread kicks in."),
METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8,
"The cleaner thread cleans until cache reaches this % full size."),
METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."),
METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true,
"Enable a metadata count at metastore startup for metrics."),
// Metastore SSL settings
HIVE_METASTORE_USE_SSL("hive.metastore.use.SSL", false,
"Set this to true for using SSL encryption in HMS server."),
HIVE_METASTORE_SSL_KEYSTORE_PATH("hive.metastore.keystore.path", "",
"Metastore SSL certificate keystore location."),
HIVE_METASTORE_SSL_KEYSTORE_PASSWORD("hive.metastore.keystore.password", "",
"Metastore SSL certificate keystore password."),
HIVE_METASTORE_SSL_TRUSTSTORE_PATH("hive.metastore.truststore.path", "",
"Metastore SSL certificate truststore location."),
HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD("hive.metastore.truststore.password", "",
"Metastore SSL certificate truststore password."),
// Parameters for exporting metadata on table drop (requires the use of the)
// org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
"metadata being exported to the current user's home directory on HDFS."),
MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
"When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
"this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
"alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
"Whether to include the current database in the Hive prompt."),
CLIPROMPT("hive.cli.prompt", "hive",
"Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
"Variable substitution will only be invoked at the Hive CLI startup."),
CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
"The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
"If the value of this property is -1, then Hive will use the auto-detected terminal width."),
HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
// Things we log in the jobconf
// session identifier
HIVESESSIONID("hive.session.id", "", ""),
// whether session is running in silent mode or not
HIVESESSIONSILENT("hive.session.silent", false, ""),
HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
"Whether to log Hive query, query plan, runtime statistics etc."),
HIVEQUERYSTRING("hive.query.string", "",
"Query being executed (might be multiple per a session)"),
HIVEQUERYID("hive.query.id", "",
"ID for query being executed (might be multiple per a session)"),
HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),
// hive jar
HIVEJAR("hive.jar.path", "",
"The location of hive_cli.jar that is used when submitting jobs in a separate jvm."),
HIVEAUXJARS("hive.aux.jars.path", "",
"The location of the plugin jars that contain implementations of user defined functions and serdes."),
// reloadable jars
HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "",
"The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n"
+ "by executing reload command. And these jars can be "
+ "used as the auxiliary classes like creating a UDF or SerDe."),
// hive added files and jars
HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."),
HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."),
HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."),
HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
// for hive script operator
HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"How long to run autoprogressor for the script/UDTF operators.\n" +
"Set to 0 for forever."),
HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
"Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
"to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" +
"outputting to stderr. This option removes the need of periodically producing stderr messages, \n" +
"but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
"Name of the environment variable that holds the unique script operator ID in the user's \n" +
"transform function (the custom mapper/reducer that the user has specified in the query)"),
HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
"Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist",
"hive.txn.valid.txns,hive.script.operator.env.blacklist",
"Comma separated list of keys from the configuration file not to convert to environment " +
"variables when envoking the script operator"),
HIVE_STRICT_CHECKS_LARGE_QUERY("hive.strict.checks.large.query", false,
"Enabling strict large query checks disallows the following:\n" +
" Orderby without limit.\n" +
" No partition being picked up for a query against partitioned table.\n" +
"Note that these checks currently do not consider data size, only the query pattern."),
HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
"Enabling strict type safety checks disallows the following:\n" +
" Comparing bigints and strings.\n" +
" Comparing bigints and doubles."),
HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", true,
"Enabling strict Cartesian join checks disallows the following:\n" +
" Cartesian product (cross join)."),
HIVE_STRICT_CHECKS_BUCKETING("hive.strict.checks.bucketing", true,
"Enabling strict bucketing checks disallows the following:\n" +
" Load into bucketed tables."),
@Deprecated
HIVEMAPREDMODE("hive.mapred.mode", null,
"Deprecated; use hive.strict.checks.* settings instead."),
HIVEALIAS("hive.alias", "", ""),
HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
"How many rows in the right-most join operand Hive should buffer before emitting the join result."),
HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
"How many rows in the joining tables (except the streaming table) should be cached in memory."),
// CBO related
HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."),
HIVE_CBO_CNF_NODES_LIMIT("hive.cbo.cnf.maxnodes", -1, "When converting to conjunctive normal form (CNF), fail if" +
"the expression exceeds this threshold; the threshold is expressed in terms of number of nodes (leaves and" +
"interior nodes). -1 to not set up a threshold."),
HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"),
HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on"
+ "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."),
HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"),
HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;"
+ " expressed as multiple of CPU cost"),
HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;"
+ " expressed as multiple of NETWORK cost"),
HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;"
+ " expressed as multiple of Local FS write cost"),
HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;"
+ " expressed as multiple of Local FS read cost"),
HIVE_CBO_SHOW_WARNINGS("hive.cbo.show.warnings", true,
"Toggle display of CBO warnings like missing column stats"),
AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"),
SEMIJOIN_CONVERSION("hive.optimize.semijoin.conversion", true, "convert group by followed by inner equi join into semijoin"),
HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" +
"columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"),
// materialized views
HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", false,
"Whether to try to rewrite queries using the materialized views enabled for rewriting"),
HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
"Default file format for CREATE MATERIALIZED VIEW statement"),
HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"),
// hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
// need to remove by hive .13. Also, do not change default (see SMB operator)
HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),
HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true,
"Whether Hive should use memory-optimized hash table for MapJoin.\n" +
"Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."),
HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent",
(float) 0.5, "Probing space percentage of the optimized hashtable"),
HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" +
"grace hash join as the join method for mapjoin. Tez only."),
HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " +
"hybrid grace hash join, how often (how many rows apart) we check if memory is full. " +
"This number should be power of 2."),
HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" +
"Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."),
HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" +
"Hybrid grace hash join, the minimum number of partitions to create."),
HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024,
"Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" +
"store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" +
"joins unnecessary memory will be allocated and then trimmed."),
HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " +
"use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."),
HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
"How many rows with the same key value should be cached in memory per smb joined table."),
HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
"Number of rows after which size of the grouping keys/aggregation classes is performed"),
HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
"Portion of total memory to be used by map-side group aggregation hash table"),
HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
"Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
"The max memory to be used by map-side group aggregation hash table.\n" +
"If the memory usage is higher than this number, force to flush data"),
HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5,
"Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" +
"Set to 1 to make sure hash aggregation is never turned off."),
HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
"Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" +
"common group by keys, it will be optimized to generate single M/R job."),
HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true,
"If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
"the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
"is that it limits the number of mappers to the number of files."),
HIVE_GROUPBY_POSITION_ALIAS("hive.groupby.position.alias", false,
"Whether to enable using Column Position Alias in Group By"),
HIVE_ORDERBY_POSITION_ALIAS("hive.orderby.position.alias", true,
"Whether to enable using Column Position Alias in Order By"),
@Deprecated
HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
"Whether to enable using Column Position Alias in Group By or Order By (deprecated).\n" +
"Use " + HIVE_ORDERBY_POSITION_ALIAS.varname + " or " + HIVE_GROUPBY_POSITION_ALIAS.varname + " instead"),
HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
"Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
"For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
"4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
"This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
"and map-side aggregation does not do a very good job. \n" +
"\n" +
"This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
"cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
"assumption that the original group by will reduce the data size."),
HIVE_GROUPBY_LIMIT_EXTRASTEP("hive.groupby.limit.extrastep", true, "This parameter decides if Hive should \n" +
"create new MR job for sorting final output"),
// Max file num and size used to do a single copy (after that, distcp is used)
HIVE_EXEC_COPYFILE_MAXNUMFILES("hive.exec.copyfile.maxnumfiles", 1L,
"Maximum number of files Hive uses to do sequential HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
"Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
"Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
// for hive udtf operator
HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
"Whether Hive should automatically send progress information to TaskTracker \n" +
"when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" +
"because this may prevent TaskTracker from killing tasks with infinite loops."),
HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none",
new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"),
"Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" +
"created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" +
"for all tables."),
HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"),
"Default file format for storing result of the query."),
HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
// default serde for rcfile
HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
"The default SerDe Hive will use for the RCFile format"),
HIVEDEFAULTSERDE("hive.default.serde",
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe Hive will use for storage formats that do not specify a SerDe."),
SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema",
"org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
"org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
"org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
"org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
"SerDes retrieving schema from metastore. This is an internal parameter."),
HIVEHISTORYFILELOC("hive.querylog.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}",
"Location of Hive run time structured log file"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
"Whether to log the plan's progress every time a job's progress is checked.\n" +
"These logs are written to the location specified by hive.querylog.location"),
HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"The interval to wait between logging the plan's progress.\n" +
"If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
"the progress is logged regardless of this value.\n" +
"The actual interval will be the ceiling of (this value divided by the value of\n" +
"hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
"I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
"logged less frequently than specified.\n" +
"This only has an effect if hive.querylog.enable.plan.progress is set to true."),
HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
"The default SerDe for transmitting input data to and reading output data from the user scripts. "),
HIVESCRIPTRECORDREADER("hive.script.recordreader",
"org.apache.hadoop.hive.ql.exec.TextRecordReader",
"The default record reader for reading data from the user scripts. "),
HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
"org.apache.hadoop.hive.ql.exec.TextRecordWriter",
"The default record writer for writing data to the user scripts. "),
HIVESCRIPTESCAPE("hive.transform.escape.input", false,
"This adds an option to escape special chars (newlines, carriage returns and\n" +
"tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
"can contain data that contains special characters."),
HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
"Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
"The last record before the end of stream can have less than hive.binary.record.max.length bytes"),
HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),
//small table file size
HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
"The threshold for the input file size of the small tables; if the file size is smaller \n" +
"than this threshold, it will try to convert the common join into map join"),
HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", true,
"Use schema evolution to convert self-describing file format's data to the schema desired by the reader."),
HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false,
"internal usage only -- do transaction (ACID) table scan.", true),
HIVE_TRANSACTIONAL_NUM_EVENTS_IN_MEMORY("hive.transactional.events.mem", 10000000,
"Vectorized ACID readers can often load all the delete events from all the delete deltas\n"
+ "into memory to optimize for performance. To prevent out-of-memory errors, this is a rough heuristic\n"
+ "that limits the total number of delete events that can be loaded into memory at once.\n"
+ "Roughly it has been set to 10 million delete events per bucket (~160 MB).\n"),
HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
"A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),
// test mode in hive mode
HIVETESTMODE("hive.test.mode", false,
"Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.",
false),
HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
"In test mode, specfies prefixes for the output table", false),
HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
"In test mode, specfies sampling frequency for table, which is not bucketed,\n" +
"For example, the following query:\n" +
" INSERT OVERWRITE TABLE dest SELECT col1 from src\n" +
"would be converted to\n" +
" INSERT OVERWRITE TABLE test_dest\n" +
" SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false),
HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
"In test mode, specifies comma separated table names which would not apply sampling", false),
HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false),
HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false),
HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false),
HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false),
HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false),
HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false),
HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
"Merge small files at the end of a map-only job"),
HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
"Merge small files at the end of a map-reduce job"),
HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"),
HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"),
HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
"Size of merged files at the end of the job"),
HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
"When the average output file size of a job is less than this number, Hive will start an additional \n" +
"map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
"if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true,
"When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" +
"while writing a table with ORC file format, enabling this config will do stripe-level\n" +
"fast merge for small ORC files. Note that enabling this config will not honor the\n" +
"padding tolerance config (hive.exec.orc.block.padding.tolerance)."),
HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
"If this is set the header for RCFiles will simply be RCF. If this is not\n" +
"set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
"by the input and output RCFile formats."),
HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""),
HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""),
HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M
PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f,
"Maximum fraction of heap that can be used by Parquet file writers in one task.\n" +
"It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" +
"This config parameter is defined in Parquet, so that it does not start with 'hive.'."),
@Deprecated
HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", true,
"Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" +
"on reading parquet files from other tools"),
HIVE_PARQUET_INT96_DEFAULT_UTC_WRITE_ZONE("hive.parquet.mr.int96.enable.utc.write.zone", false,
"Enable this variable to use UTC as the default timezone for new Parquet tables."),
HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false,
"Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" +
"Set this flag to true to interpret the value as seconds to be consistent with float/double." ),
HIVE_ORC_BASE_DELTA_RATIO("hive.exec.orc.base.delta.ratio", 8, "The ratio of base writer and\n" +
"delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"),
"This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" +
" as opposed to query execution (split generation does not read or cache file footers)." +
" ETL strategy is used when spending little more time in split generation is acceptable" +
" (split generation reads and caches file footers). HYBRID chooses between the above strategies" +
" based on heuristics."),
HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
"Whether to enable using file metadata cache in metastore for ORC file footers."),
HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true,
"Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" +
"must also be set to true for this to work)."),
HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
"If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
"data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS("hive.orc.splits.directory.batch.ms", 0,
"How long, in ms, to wait to batch input directories for processing during ORC split\n" +
"generation. 0 means process directories individually. This can increase the number of\n" +
"metastore calls if metastore metadata cache is used."),
HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
"Include file ID in splits on file systems that support it."),
HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
"Allow synthetic file ID in splits on file systems that don't have a native one."),
HIVE_ORC_CACHE_STRIPE_DETAILS_MEMORY_SIZE("hive.orc.cache.stripe.details.mem.size", "256Mb",
new SizeValidator(), "Maximum size of orc splits cached in the client."),
HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
"How many threads orc should use to create splits in parallel."),
HIVE_ORC_CACHE_USE_SOFT_REFERENCES("hive.orc.cache.use.soft.references", false,
"By default, the cache that ORC input format uses to store orc file footer use hard\n" +
"references for the cached object. Setting this to true can help avoid out of memory\n" +
"issues under memory pressure (in some cases) at the cost of slight unpredictability in\n" +
"overall query performance."),
HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false,
"LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" +
"'1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" +
"The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" +
"boolean literal."),
HIVESKEWJOIN("hive.optimize.skewjoin", false,
"Whether to enable skew join optimization. \n" +
"The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
"processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
"job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
"the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
"map-join."),
HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false,
"Whether to enable dynamically partitioned hash join optimization. \n" +
"This setting is also dependent on enabling hive.auto.convert.join"),
HIVECONVERTJOIN("hive.auto.convert.join", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
"Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
"If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
"specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
10000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
"the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false,
"For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
"filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
"Currently, this is not working with vectorization or tez execution engine."),
HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
"Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
"we think the key as a skew join key. "),
HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
"Determine the number of map task used in the follow up map join job for a skew join.\n" +
"It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
"Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
"the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
"Send a heartbeat after this interval - used by mapjoin and filter operators"),
HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
"When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
"When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
"Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
"Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
"Insert queries are not restricted by this limit."),
HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(),
"The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."),
@Deprecated
HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1,
"This controls how many partitions can be scanned for each partitioned table.\n" +
"The default value \"-1\" means no limit. (DEPRECATED: Please use " + ConfVars.METASTORE_LIMIT_PARTITION_REQUEST + " in the metastore instead.)"),
HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 40000000L,
"If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
"However, if it is on, and the predicated number of entries in hashtable for a given join \n" +
"input is larger than this number, the join will not be converted to a mapjoin. \n" +
"The value \"-1\" means no limit."),
HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 1.0f,
"Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" +
" of the number of keys is divided by this value. If the value is 0, statistics are not used" +
"and hive.hashtable.initialCapacity is used instead."),
HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
"mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"),
HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
"when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
"the local task will abort by itself. It means the data of the small table is too large to be held in memory."),
HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
"This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
"If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
"It means the data of the small table is too large to be held in memory."),
HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
"The number means after how many rows processed it needs to check the memory usage"),
HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
"The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"The default input format for tez. Tez groups splits in the AM."),
HIVETEZCONTAINERSIZE("hive.tez.container.size", -1,
"By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."),
HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1,
"By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" +
"This can be used to overwrite."),
HIVETEZJAVAOPTS("hive.tez.java.opts", null,
"By default Tez will use the Java options from map tasks. This can be used to overwrite."),
HIVETEZLOGLEVEL("hive.tez.log.level", "INFO",
"The log level to use for tasks executing as part of the DAG.\n" +
"Used only if hive.tez.java.opts is used to configure Java options."),
HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true,
"Whether to grant access to the hs2/hive user for queries"),
HIVEQUERYNAME ("hive.query.name", null,
"This named is used by Tez to set the dag name. This name in turn will appear on \n" +
"the Tez UI representing the work that was done."),
HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
"Don't create a reducer for enforcing \n" +
"bucketing/sorting for queries of the form: \n" +
"insert overwrite table T2 select * from T1;\n" +
"where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
"If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
"If the user asked for bucketed map-side join, and it cannot be performed, \n" +
"should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
"not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
"query will fail if hive.enforce.bucketmapjoin is set to true."),
HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false,
"Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
HIVE_AUTO_SORTMERGE_JOIN_REDUCE("hive.auto.convert.sortmerge.join.reduce.side", true,
"Whether hive.auto.convert.sortmerge.join (if enabled) should be applied to reduce side."),
HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
"hive.auto.convert.sortmerge.join.bigtable.selection.policy",
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
"The policy to choose the big table for automatic conversion to sort-merge join. \n" +
"By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
". based on position of the table - the leftmost table is selected\n" +
"org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
". based on total size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" +
". based on average size (all the partitions selected in the query) of the table \n" +
"org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" +
"New policies can be added in future."),
HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
"hive.auto.convert.sortmerge.join.to.mapjoin", false,
"If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" +
"this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" +
"tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" +
"big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" +
"sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" +
"and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" +
"with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" +
"if the complete small table can fit in memory, and a map-join can be performed."),
HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""),
HIVEROWOFFSET("hive.exec.rowoffset", false,
"Whether to provide the row offset virtual column"),
// Optimizer
HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
"Whether to enable automatic use of indexes"),
HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false,
"Whether to update stale indexes automatically"),
HIVEOPTPPD("hive.optimize.ppd", true,
"Whether to enable predicate pushdown"),
HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
"Whether to enable predicate pushdown through windowing"),
HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true,
"Whether to transitively replicate predicate filters over equijoin conditions."),
HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
"During query optimization, filters may be pushed down in the operator tree. \n" +
"If this config is true only pushed down filters remain in the operator tree, \n" +
"and the original filter is removed. If this config is false, the original filter \n" +
"is also left in the operator tree at the original place."),
HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
"Whether to transform OR clauses in Filter operators into IN clauses"),
HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 31,
"Minimum number of OR clauses needed to transform into IN clauses"),
HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true,
"Extract partition columns from IN clauses"),
// Constant propagation optimizer
HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),
HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false,
"Whether to eliminate scans of the tables from which no columns are selected. Note\n" +
"that, when selecting from empty tables with data files, this can produce incorrect\n" +
"results, so it's disabled by default. It works correctly for normal tables."),
HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations which are guaranteed to not generate any rows"),
HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
"Whether to push predicates down to storage handlers"),
HIVEOPTGROUPBY("hive.optimize.groupby", true,
"Whether to enable the bucketed group by from bucketed partitions/tables."),
HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false,
"Whether to try bucket mapjoin"),
HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false,
"Whether to try sorted bucket merge map join"),
HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true,
"Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" +
"This should always be set to true. Since it is a new feature, it has been made configurable."),
HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4,
"Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" +
"That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" +
"The optimization will be automatically disabled if number of reducers would be less than specified value."),
HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false,
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers."),
HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."),
HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
"Probability with which a row will be chosen."),
HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this "
+ "optimization rewrites distinct aggregates from a single stage to multi-stage "
+ "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or "
+ "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."),
// whether to optimize union followed by select followed by filesink
// It creates sub-directories in the final output, so should not be turned on in systems
// where MAPREDUCE-1501 is not present
HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false,
"Whether to remove the union and push the operators between union and the filesink above union. \n" +
"This avoids an extra scan of the output by union. This is independently useful for union\n" +
"queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" +
"extra union is inserted.\n" +
"\n" +
"The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" +
"If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" +
"number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" +
"we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false,
"Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" +
"input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" +
"to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" +
"size of the outer input of the join or input of the union that we should get in order to apply the rule."),
HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0,
"When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" +
"number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."),
HIVE_OPTIMIZE_REDUCE_WITH_STATS("hive.optimize.filter.stats.reduction", false, "Whether to simplify comparison\n" +
"expressions in filter operators using column stats"),
HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
"Whether to create a separate plan for skewed keys for the tables in the join.\n" +
"This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" +
"into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" +
"a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" +
"in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" +
"\n" +
"The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" +
"uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" +
"If there is no skew information in the metadata, this parameter will not have any affect.\n" +
"Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" +
"Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" +
"so for backward compatibility.\n" +
"\n" +
"If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
"would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),
HIVE_SHARED_SCAN_OPTIMIZATION("hive.optimize.shared.scan", true,
"Whether to enable shared scan optimizer. The optimizer finds scan operator over the same table\n" +
"in the query plan and merges them if they meet some preconditions."),
// CTE
HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1,
"If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
"before executing the main query block. -1 will disable this feature."),
// Indexes
HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024,
"Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G
HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1,
"Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity."), // infinity
HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000,
"The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity."), // 10M
HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024,
"The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity."), // 10G
HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true,
"Whether or not to use a binary search to find the entries in an index table that match the filter, where possible"),
// Statistics
HIVESTATSAUTOGATHER("hive.stats.autogather", true,
"A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", false,
"A flag to gather column statistics automatically."),
HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"),
"The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" +
"each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" +
"after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB
HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
HIVE_STATS_ATOMIC("hive.stats.atomic", false,
"whether to update metastore stats only if all stats are available"),
CLIENT_STATS_COUNTERS("hive.client.stats.counters", "",
"Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" +
"Non-display names should be used"),
//Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used".
HIVE_STATS_RELIABLE("hive.stats.reliable", false,
"Whether queries will fail because stats cannot be collected completely accurately. \n" +
"If this is set to true, reading/writing from/into a partition may fail because the stats\n" +
"could not be computed accurately."),
HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true,
"analyze table T compute statistics for columns. Queries like these should compute partition"
+ "level stats for partitioned table even when no part spec is specified."),
HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10,
"Number of threads used by partialscan/noscan analyze command for partitioned tables.\n" +
"This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."),
// Collect table access keys information for operators that can benefit from bucketing
HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false,
"Whether join and group by keys on tables are derived and maintained in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if they should be bucketed."),
// Collect column access information
HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false,
"Whether column accesses are tracked in the QueryPlan.\n" +
"This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."),
// standard error allowed for ndv estimates. A lower value indicates higher accuracy and a
// higher compute cost.
HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0,
"Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" +
"A lower value for error indicates higher accuracy and a higher compute cost."),
HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0,
"Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
"The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
"Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false,
"Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only
// if length of variable length data type cannot be determined this length will be used.
HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics, for variable length columns (like string, bytes etc.), this value will be\n" +
"used. For fixed length columns their corresponding Java equivalent sizes are used\n" +
"(float - 4 bytes, double - 8 bytes etc.)."),
// if number of elements in list cannot be determined, this value will be used
HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like list, the average number of\n" +
"entries/values can be specified using this config."),
// if number of elements in map cannot be determined, this value will be used
HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10,
"To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" +
"average row size is multiplied with the total number of rows coming out of each operator.\n" +
"Average row size is computed from average column size of all columns in the row. In the absence\n" +
"of column statistics and for variable length complex columns like map, the average number of\n" +
"entries/values can be specified using this config."),
// statistics annotation fetches stats for each partition, which can be expensive. turning
// this off will result in basic sizes being fetched from namenode instead
HIVE_STATS_FETCH_PARTITION_STATS("hive.stats.fetch.partition.stats", true,
"Annotation of operator tree with statistics information requires partition level basic\n" +
"statistics like number of rows, data size and file size. Partition statistics are fetched from\n" +
"metastore. Fetching partition statistics for each needed partition can be expensive when the\n" +
"number of partitions is high. This flag can be used to disable fetching of partition statistics\n" +
"from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes\n" +
"and will estimate the number of rows from row schema."),
// statistics annotation fetches column statistics for all required columns which can
// be very expensive sometimes
HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", false,
"Annotation of operator tree with statistics information requires column statistics.\n" +
"Column statistics are fetched from metastore. Fetching column statistics for each needed column\n" +
"can be expensive when the number of columns is high. This flag can be used to disable fetching\n" +
"of column statistics from metastore."),
// in the absence of column statistics, the estimated number of rows/data size that will
// be emitted from join operator will depend on this factor
HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\n" +
"uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" +
"In the absence of column statistics, this factor determines the amount of rows that flows out\n" +
"of JOIN operator."),
HIVE_STATS_CORRELATED_MULTI_KEY_JOINS("hive.stats.correlated.multi.key.joins", false,
"When estimating output rows for a join involving multiple columns, the default behavior assumes" +
"the columns are independent. Setting this flag to true will cause the estimator to assume" +
"the columns are correlated."),
// in the absence of uncompressed/raw data size, total file size will be used for statistics
// annotation. But the file may be compressed, encoded and serialized which may be lesser in size
// than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
// the raw data size.
HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 1.0,
"Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\n" +
"of basic statistics like number of rows and data size, file size is used to estimate the number\n" +
"of rows and data size. Since files in tables/partitions are serialized (and optionally\n" +
"compressed) the estimates of number of rows and data size cannot be reliably determined.\n" +
"This factor is multiplied with the file size to account for serialization and compression."),
HIVE_STATS_IN_CLAUSE_FACTOR("hive.stats.filter.in.factor", (float) 1.0,
"Currently column distribution is assumed to be uniform. This can lead to overestimation/underestimation\n" +
"in the number of rows filtered by a certain operator, which in turn might lead to overprovision or\n" +
"underprovision of resources. This factor is applied to the cardinality estimation of IN clauses in\n" +
"filter operators."),
// Concurrency
HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false,
"Whether Hive supports concurrency control or not. \n" +
"A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""),
HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100,
"The number of times you want to try to get all the locks"),
HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10,
"The number of times you want to retry to do one unlock"),
HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s",
new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false),
"The maximum sleep time between various retries"),
HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
"This param is to control whether or not only do lock on queries\n" +
"that need to execute at least one mapred job."),
HIVE_LOCK_QUERY_STRING_MAX_LENGTH("hive.lock.query.string.max.length", 1000000,
"The maximum length of the query string to store in the lock.\n" +
"The default value is 1000000, since the data limit of a znode is 1MB"),
// Zookeeper related configs
HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "",
"List of ZooKeeper servers to talk to. This is needed for: \n" +
"1. Read/write locks - when hive.lock.manager is set to \n" +
"org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n" +
"2. When HiveServer2 supports service discovery via Zookeeper.\n" +
"3. For delegation token storage if zookeeper store is used, if\n" +
"hive.cluster.delegation.token.store.zookeeper.connectString is not set\n" +
"4. LLAP daemon registry service"),
HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181",
"The port of ZooKeeper servers to talk to.\n" +
"If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" +
"does not contain port numbers, this value is used."),
HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" +
"if a heartbeat is not sent in the timeout."),
HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace",
"The parent node under which all ZooKeeper nodes are created."),
HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false,
"Clean extra nodes at the end of the session."),
HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3,
"Max number of times to retry when connecting to the ZooKeeper server."),
HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Initial amount of time (in milliseconds) to wait between retries\n" +
"when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."),
// Transactions
HIVE_TXN_MANAGER("hive.txn.manager",
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
"transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
"hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
"and hive.exec.dynamic.partition.mode (nonstrict).\n" +
"The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
"no transactions."),
HIVE_TXN_STRICT_LOCKING_MODE("hive.txn.strict.locking.mode", true, "In strict mode non-ACID\n" +
"resources use standard R/W lock semantics, e.g. INSERT will acquire exclusive lock.\n" +
"In nonstrict mode, for non-ACID resources, INSERT will only acquire shared lock, which\n" +
"allows two concurrent writes to the same partition but still lets lock manager prevent\n" +
"DROP TABLE etc. when the table is being written to"),
HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS),
"time after which transactions are declared aborted if the client has not sent a heartbeat."),
HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " +
"threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"),
TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false,
"Set this to true so that when attempt to acquire a lock on resource times out, the current state" +
" of the lock manager is dumped to log file. This is for debugging. See also " +
"hive.lock.numretries and hive.lock.sleep.between.retries."),
HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 0,
"Sets the operational properties that control the appropriate behavior for various\n"
+ "versions of the Hive ACID subsystem. Setting it to zero will turn on the legacy mode\n"
+ "for ACID, while setting it to one will enable a split-update feature found in the newer\n"
+ "version of Hive ACID subsystem. Mostly it is intended to be used as an internal property\n"
+ "for future versions of ACID. (See HIVE-14035 for details.)"),
HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" +
"current open transactions reach this limit, future open transaction requests will be \n" +
"rejected, until this number goes below the limit."),
HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s",
new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks to count open transactions."),
HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
"Maximum number of transactions that can be fetched in one call to open_txns().\n" +
"This controls how many transactions streaming agents such as Flume or Storm open\n" +
"simultaneously. The streaming agent then writes that number of entries into a single\n" +
"file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
"of delta files created by streaming agents. But it also increases the number of open\n" +
"transactions that Hive has to track at any given time, which may negatively affect\n" +
"read performance."),
HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
"of regular expression patterns for SQL state, error code, and error message of\n" +
"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
"The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
"Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
"Set this to true on one instance of the Thrift metastore service as part of turning\n" +
"on Hive transactions. For a complete list of parameters required for turning on\n" +
"transactions, see hive.txn.manager."),
HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0,
"How many compactor worker threads to run on this metastore instance. Set this to a\n" +
"positive number on one or more instances of the Thrift metastore service as part of\n" +
"turning on Hive transactions. For a complete list of parameters required for turning\n" +
"on transactions, see hive.txn.manager.\n" +
"Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
"themselves. Increasing the number of worker threads will decrease the time it takes\n" +
"tables or partitions to be compacted once they are determined to need compaction.\n" +
"It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
"will be running in the background."),
HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds after which a compaction job will be declared failed and the\n" +
"compaction re-queued."),
HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s",
new TimeValidator(TimeUnit.SECONDS),
"Time in seconds between checks to see if any tables or partitions need to be\n" +
"compacted. This should be kept high because each check for compaction requires\n" +
"many calls against the NameNode.\n" +
"Decreasing this value will reduce the time it takes for compaction to be started\n" +
"for a table or partition that requires compaction. However, checking if compaction\n" +
"is needed requires several calls to the NameNode for each table or partition that\n" +
"has had a transaction done on it since the last major compaction. So decreasing this\n" +
"value will increase the load on the NameNode."),
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
"Number of delta directories in a table or partition that will trigger a minor\n" +
"compaction."),
HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f,
"Percentage (fractional) size of the delta files relative to the base that will trigger\n" +
"a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"),
COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " +
"the compactor will attempt to handle in a single job."),
HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000,
"Number of aborted transactions involving a given table or partition that will trigger\n" +
"a major compaction."),
COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2,
new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
"after which automatic compactions will not be scheduled any more. Note that this must be less " +
"than hive.compactor.history.retention.failed."),
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" +
"Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."),
COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3,
new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
"retained in compaction history for a given table/partition."),
COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3,
new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
"retained in compaction history for a given table/partition."),
COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2,
new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
"retained in compaction history for a given table/partition."),
COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m",
new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"),
HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"),
WRITE_SET_REAPER_INTERVAL("hive.writeset.reaper.interval", "60s",
new TimeValidator(TimeUnit.MILLISECONDS), "Frequency of WriteSet reaper runs"),
MERGE_CARDINALITY_VIOLATION_CHECK("hive.merge.cardinality.check", true,
"Set to true to ensure that each SQL Merge statement ensures that for each row in the target\n" +
"table there is at most 1 matching row in the source table per SQL Specification."),
// For Druid storage handler
HIVE_DRUID_INDEXING_GRANULARITY("hive.druid.indexer.segments.granularity", "DAY",
new PatternSet("YEAR", "MONTH", "WEEK", "DAY", "HOUR", "MINUTE", "SECOND"),
"Granularity for the segments created by the Druid storage handler"
),
HIVE_DRUID_MAX_PARTITION_SIZE("hive.druid.indexer.partition.size.max", 5000000,
"Maximum number of records per segment partition"
),
HIVE_DRUID_MAX_ROW_IN_MEMORY("hive.druid.indexer.memory.rownum.max", 75000,
"Maximum number of records in memory while storing data in Druid"
),
HIVE_DRUID_BROKER_DEFAULT_ADDRESS("hive.druid.broker.address.default", "localhost:8082",
"Address of the Druid broker. If we are querying Druid from Hive, this address needs to be\n"
+
"declared"
),
HIVE_DRUID_COORDINATOR_DEFAULT_ADDRESS("hive.druid.coordinator.address.default", "localhost:8081",
"Address of the Druid coordinator. It is used to check the load status of newly created segments"
),
HIVE_DRUID_SELECT_DISTRIBUTE("hive.druid.select.distribute", true,
"If it is set to true, we distribute the execution of Druid Select queries. Concretely, we retrieve\n" +
"the result for Select queries directly from the Druid nodes containing the segments data.\n" +
"In particular, first we contact the Druid broker node to obtain the nodes containing the segments\n" +
"for the given query, and then we contact those nodes to retrieve the results for the query.\n" +
"If it is set to false, we do not execute the Select queries in a distributed fashion. Instead, results\n" +
"for those queries are returned by the Druid broker node."),
HIVE_DRUID_SELECT_THRESHOLD("hive.druid.select.threshold", 10000,
"Takes only effect when hive.druid.select.distribute is set to false. \n" +
"When we can split a Select query, this is the maximum number of rows that we try to retrieve\n" +
"per query. In order to do that, we obtain the estimated size for the complete result. If the\n" +
"number of records of the query results is larger than this threshold, we split the query in\n" +
"total number of rows/threshold parts across the time dimension. Note that we assume the\n" +
"records to be split uniformly across the time dimension."),
HIVE_DRUID_NUM_HTTP_CONNECTION("hive.druid.http.numConnection", 20, "Number of connections used by\n" +
"the HTTP client."),
HIVE_DRUID_HTTP_READ_TIMEOUT("hive.druid.http.read.timeout", "PT1M", "Read timeout period for the HTTP\n" +
"client in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 1 minute."),
HIVE_DRUID_SLEEP_TIME("hive.druid.sleep.time", "PT10S",
"Sleep time between retries in ISO8601 format (for example P2W, P3M, PT1H30M, PT0.750S), default is period of 10 seconds."
),
HIVE_DRUID_BASE_PERSIST_DIRECTORY("hive.druid.basePersistDirectory", "",
"Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir."
),
DRUID_SEGMENT_DIRECTORY("hive.druid.storage.storageDirectory", "/druid/segments"
, "druid deep storage location."),
DRUID_METADATA_BASE("hive.druid.metadata.base", "druid", "Default prefix for metadata tables"),
DRUID_METADATA_DB_TYPE("hive.druid.metadata.db.type", "mysql",
new PatternSet("mysql", "postgresql"), "Type of the metadata database."
),
DRUID_METADATA_DB_USERNAME("hive.druid.metadata.username", "",
"Username to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_PASSWORD("hive.druid.metadata.password", "",
"Password to connect to Type of the metadata DB."
),
DRUID_METADATA_DB_URI("hive.druid.metadata.uri", "",
"URI to connect to the database (for example jdbc:mysql://hostname:port/DBName)."
),
DRUID_WORKING_DIR("hive.druid.working.directory", "/tmp/workingDirectory",
"Default hdfs working directory used to store some intermediate metadata"
),
HIVE_DRUID_MAX_TRIES("hive.druid.maxTries", 5, "Maximum number of retries before giving up"),
HIVE_DRUID_PASSIVE_WAIT_TIME("hive.druid.passiveWaitTimeMs", 30000,
"Wait time in ms default to 30 seconds."
),
HIVE_DRUID_BITMAP_FACTORY_TYPE("hive.druid.bitmap.type", "roaring", new PatternSet("roaring", "concise"), "Coding algorithm use to encode the bitmaps"),
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
"Whether writes to HBase should be forced to the write-ahead log. \n" +
"Disabling this improves HBase write performance at the risk of lost writes in case of a crash."),
HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false,
"True when HBaseStorageHandler should generate hfiles instead of operate against the online table."),
HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."),
HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " +
"restore the HBase table snapshot."),
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false,
"Whether to enable optimization of group-by queries using Aggregate indexes."),
HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"),
"Some select queries can be converted to single FETCH task minimizing latency.\n" +
"Currently the query should be single sourced not having any subquery and should not have\n" +
"any aggregations or distincts (which incurs RS), lateral views and joins.\n" +
"0. none : disable hive.fetch.task.conversion\n" +
"1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" +
"2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)"
),
HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L,
"Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" +
"is calculated by summation of file lengths. If it's not native, storage handler for the table\n" +
"can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."),
HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false,
"Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" +
"final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" +
"stage to fetch task, possibly decreasing the query time."),
HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true,
"When set to true Hive will answer a few queries like count(1) purely using stats\n" +
"stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" +
"For more advanced stats collection need to run analyze table queries."),
// Serde for FetchTask
HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe",
"The SerDe used by FetchTask to serialize the fetch output."),
HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true,
"If true, the evaluation result of a deterministic expression referenced twice or more\n" +
"will be cached.\n" +
"For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" +
"the expression 'key + 10' will be evaluated/cached once and reused for the following\n" +
"expression ('key + 10 = 0'). Currently, this is applied only to expressions in select\n" +
"or filter operators."),
// Hive Variables
HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true,
"This enables substitution using syntax like ${var} ${system:var} and ${env:var}."),
HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40,
"The maximum replacements the substitution engine will do."),
HIVECONFVALIDATION("hive.conf.validation", true,
"Enables type checking for registered Hive configurations"),
SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""),
HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE(
"hive.test.authz.sstd.hs2.mode", false, "test hs2 mode from .q tests", true),
HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false,
"enable or disable the Hive client authorization"),
HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
"The Hive client authorization manager class name. The user defined authorization class should implement \n" +
"interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator",
"hive client authenticator manager class name. The user defined authenticator should implement \n" +
"interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider",
"Names of authorization manager classes (comma separated) to be used in the metastore\n" +
"for authorization. The user defined authorization class should implement interface\n" +
"org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\n" +
"All authorization manager classes have to successfully authorize the metastore API\n" +
"call for the command execution to be allowed."),
HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true,
"If this is true, metastore authorizer authorizes read actions on database, table"),
HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
"org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"authenticator manager class name to be used in the metastore for authentication. \n" +
"The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", "",
"the privileges automatically granted to some users whenever a table gets created.\n" +
"An example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\n" +
"and grant create privilege to userZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
"",
"the privileges automatically granted to some groups whenever a table gets created.\n" +
"An example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\n" +
"and grant create privilege to groupZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", "",
"the privileges automatically granted to some roles whenever a table gets created.\n" +
"An example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\n" +
"and grant create privilege to roleZ whenever a new table created."),
HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants",
"",
"The privileges automatically granted to the owner whenever a table gets created.\n" +
"An example like \"select,drop\" will grant select and drop privilege to the owner\n" +
"of the table. Note that the default gives the creator of a table no access to the\n" +
"table (but see HIVE-8067)."),
HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory",
"org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl",
"Authorization DDL task factory implementation"),
// if this is not set default value is set during config initialization
// Default value can't be set in this constructor as it would refer names in other ConfVars
// whose constructor would not have been called
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST(
"hive.security.authorization.sqlstd.confwhitelist", "",
"List of comma separated Java regexes. Configurations parameters that match these\n" +
"regexes can be modified by user when SQL standard authorization is enabled.\n" +
"To get the default value, use the 'set <param>' command.\n" +
"Note that the hive.conf.restricted.list checks are still enforced after the white list\n" +
"check"),
HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND(
"hive.security.authorization.sqlstd.confwhitelist.append", "",
"List of comma separated Java regexes, to be appended to list set in\n" +
"hive.security.authorization.sqlstd.confwhitelist. Using this list instead\n" +
"of updating the original list means that you can append to the defaults\n" +
"set by SQL standard authorization instead of replacing it entirely."),
HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" +
"session in background when running CLI with Tez, allowing CLI to be available earlier."),
HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
"Whether to throw an exception if dynamic partition insert generates empty results."),
HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", "", "internal variable"),
HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", "", "internal variable"),
HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false,
"When true the HDFS location stored in the index file will be ignored at runtime.\n" +
"If the data got moved or the name of the cluster got changed, the index data should still be usable."),
HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a",
"A comma separated list of acceptable URI schemes for import and export."),
// temporary variable for testing. This is added just to turn off this feature in case of a bug in
// deployment. It has not been documented in hive-default.xml intentionally, this should be removed
// once the feature is stable
HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true,
"Parameter that determines if 'regular' (non-replication) export dumps can be\n" +
"imported on to tables that are the target of replication. If this parameter is\n" +
"set, regular imports will check if the destination table(if it exists) has a " +
"'repl.last.id' set on it. If so, it will fail."),
HIVE_REPL_TASK_FACTORY("hive.repl.task.factory",
"org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory",
"Parameter that can be used to override which ReplicationTaskFactory will be\n" +
"used to instantiate ReplicationTask events. Override for third party repl plugins"),
HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""),
HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
"should rework the mapred work or not.\n" +
"This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."),
HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true,
"If this is set to true, Hive will throw error when doing\n" +
"'alter table tbl_name [partSpec] concatenate' on a table/partition\n" +
"that has indexes on it. The reason the user want to set this to true\n" +
"is because it can help user to avoid handling all index drop, recreation,\n" +
"rebuild work. This is very helpful for tables with thousands of partitions."),
HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
"A list of io exception handler class names. This is used\n" +
"to construct a list exception handlers to handle exceptions thrown\n" +
"by record readers"),
// logging configuration
HIVE_LOG4J_FILE("hive.log4j.file", "",
"Hive log4j configuration file.\n" +
"If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "",
"Hive log4j configuration file for execution mode(sub command).\n" +
"If the property is not set, then logging will be initialized using hive-exec-log4j2.properties found on the classpath.\n" +
"If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
"which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
HIVE_ASYNC_LOG_ENABLED("hive.async.log.enabled", true,
"Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
" significant performance improvement as logging will be handled in separate thread\n" +
" that uses LMAX disruptor queue for buffering log messages.\n" +
" Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
" drawbacks."),
HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,
"Whether to log explain output for every query.\n" +
"When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."),
HIVE_EXPLAIN_USER("hive.explain.user", true,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Tez only."),
HIVE_SPARK_EXPLAIN_USER("hive.spark.explain.user", false,
"Whether to show explain result at user level.\n" +
"When enabled, will log EXPLAIN output for the query at user level. Spark only."),
// prefix used to auto generated column aliases (this should be started with '_')
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c",
"String used as a prefix when auto generating column alias.\n" +
"By default the prefix label will be appended with a column position number to form the column alias. \n" +
"Auto generation would happen if an aggregate function is used in a select clause without an explicit alias."),
HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME(
"hive.autogen.columnalias.prefix.includefuncname", false,
"Whether to include function name in the column alias auto generated by Hive."),
HIVE_METRICS_CLASS("hive.service.metrics.class",
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
new StringSet(
"org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics",
"org.apache.hadoop.hive.common.metrics.LegacyMetrics"),
"Hive metrics subsystem implementation class."),
HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
"org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter",
"Comma separated list of reporter implementation classes for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+ "HIVE_METRICS_REPORTER conf if present"),
@Deprecated
HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "",
"Reporter implementations for metric class "
+ "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
"Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be"
+ " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " +
"Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json",
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " +
"This file will get overwritten at every interval."),
HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
"the frequency of updating JSON metrics file."),
HIVE_METRICS_HADOOP2_INTERVAL("hive.service.metrics.hadoop2.frequency", "30s",
new TimeValidator(TimeUnit.SECONDS),
"For metric class org.apache.hadoop.hive.common.metrics.metrics2.Metrics2Reporter, " +
"the frequency of updating the HADOOP2 metrics system."),
HIVE_METRICS_HADOOP2_COMPONENT_NAME("hive.service.metrics.hadoop2.component",
"hive",
"Component name to provide to Hadoop2 Metrics system. Ideally 'hivemetastore' for the MetaStore " +
" and and 'hiveserver2' for HiveServer2."
),
HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger",
"The class responsible for logging client side performance metrics. \n" +
"Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"),
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false,
"To cleanup the Hive scratchdir when starting the Hive Server"),
HIVE_SCRATCH_DIR_LOCK("hive.scratchdir.lock", false,
"To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir"),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
"Where to insert into multilevel directories like\n" +
"\"insert directory '/HIVEFT25686/chinna/' from table\""),
HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true,
"whether insert into external tables is allowed"),
HIVE_TEMPORARY_TABLE_STORAGE(
"hive.exec.temporary.table.storage", "default", new StringSet("memory",
"ssd", "default"), "Define the storage policy for temporary tables." +
"Choices between memory, ssd and default"),
HIVE_QUERY_LIFETIME_HOOKS("hive.query.lifetime.hooks", "",
"A comma separated list of hooks which implement QueryLifeTimeHook. These will be triggered" +
" before/after query compilation and before/after query execution, in the order specified." +
"Implementations of QueryLifeTimeHookWithParseHooks can also be specified in this list. If they are" +
"specified then they will be invoked in the same places as QueryLifeTimeHooks and will be invoked during pre " +
"and post query parsing"),
HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "",
"A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " +
"and end of Driver.run, these will be run in the order specified."),
HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null,
"The data format to use for DDL output. One of \"text\" (for human\n" +
"readable text) or \"json\" (for a json object)."),
HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@",
"Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname"),
HIVE_CAPTURE_TRANSFORM_ENTITY("hive.entity.capture.transform", false,
"Compiler to capture transform URI referred in the query"),
HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately", true,
"In older Hive version (0.10 and earlier) no distinction was made between\n" +
"partition columns or non-partition columns while displaying columns in describe\n" +
"table. From 0.12 onwards, they are displayed separately. This flag will let you\n" +
"get old behavior, if desired. See, test-case in patch for HIVE-6689."),
HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3",
"SSL Versions to disable for all Hive Servers"),
// HiveServer2 specific configs
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir", false,
"Clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL("hive.server2.clear.dangling.scratchdir.interval",
"1800s", new TimeValidator(TimeUnit.SECONDS),
"Interval to clear dangling scratch dir periodically in HS2"),
HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS("hive.server2.sleep.interval.between.start.attempts",
"60s", new TimeValidator(TimeUnit.MILLISECONDS, 0l, true, Long.MAX_VALUE, true),
"Amount of time to sleep between HiveServer2 start attempts. Primarily meant for tests"),
HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null),
"Number of times HiveServer2 will attempt to start before exiting. The sleep interval between retries" +
" is determined by " + ConfVars.HIVE_SERVER2_SLEEP_INTERVAL_BETWEEN_START_ATTEMPTS.varname +
"\n The default of 30 will keep trying for 30 minutes."),
HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY("hive.server2.support.dynamic.service.discovery", false,
"Whether HiveServer2 supports dynamic service discovery for its clients. " +
"To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself, " +
"when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: " +
"hive.zookeeper.quorum in their connection string."),
HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", "hiveserver2",
"The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."),
HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS("hive.server2.zookeeper.publish.configs", true,
"Whether we should publish HiveServer2's configs to ZooKeeper."),
// HiveServer2 global init file location
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}",
"Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" +
"property is set, the value must be a valid path to an init file or directory where the init file is located."),
HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"),
"Transport mode of HiveServer2."),
HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "",
"Bind host on which to run the HiveServer2 Thrift service."),
HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" +
"enable parallel compilation of the queries between sessions and within the same session on HiveServer2. The default is false."),
HIVE_SERVER2_COMPILE_LOCK_TIMEOUT("hive.server2.compile.lock.timeout", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Number of seconds a request will wait to acquire the compile lock before giving up. " +
"Setting it to 0s disables the timeout."),
HIVE_SERVER2_PARALLEL_OPS_IN_SESSION("hive.server2.parallel.ops.in.session", true,
"Whether to allow several parallel operations (such as SQL statements) in one session."),
// HiveServer2 WebUI
HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"),
HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on. This can be"
+ "set to 0 or a negative integer to disable the web UI"),
HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"),
HIVE_SERVER2_WEBUI_USE_SSL("hive.server2.webui.use.ssl", false,
"Set this to true for using SSL encryption for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH("hive.server2.webui.keystore.path", "",
"SSL certificate keystore location for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD("hive.server2.webui.keystore.password", "",
"SSL certificate keystore password for HiveServer2 WebUI."),
HIVE_SERVER2_WEBUI_USE_SPNEGO("hive.server2.webui.use.spnego", false,
"If true, the HiveServer2 WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos."),
HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB("hive.server2.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the HiveServer2 WebUI SPNEGO service principal."),
HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL("hive.server2.webui.spnego.principal",
"HTTP/[email protected]", "The HiveServer2 WebUI SPNEGO service principal.\n" +
"The special string _HOST will be replaced automatically with \n" +
"the value of hive.server2.webui.host or the correct host name."),
HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES("hive.server2.webui.max.historic.queries", 25,
"The maximum number of past queries to show in HiverSever2 WebUI."),
// Tez session settings
HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "",
"A list of comma separated values corresponding to YARN queues of the same name.\n" +
"When HiveServer2 is launched in Tez mode, this configuration needs to be set\n" +
"for multiple Tez sessions to run in parallel on the cluster."),
HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1,
"A positive integer that determines the number of Tez sessions that should be\n" +
"launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" +
"Determines the parallelism on each queue."),
HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
false,
"This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" +
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME("hive.server2.tez.session.lifetime", "162h",
new TimeValidator(TimeUnit.HOURS),
"The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\n" +
"Set to 0 to disable session expiration."),
HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h",
new TimeValidator(TimeUnit.HOURS),
"The jitter for Tez session lifetime; prevents all the sessions from restarting at once."),
HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16,
"If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" +
"threads to use to initialize the default sessions."),
HIVE_SERVER2_TEZ_SESSION_RESTRICTED_CONFIGS("hive.server2.tez.sessions.restricted.configs", "",
"The configuration settings that cannot be set when submitting jobs to HiveServer2. If\n" +
"any of these are set to values different from those in the server configuration, an\n" +
"exception will be thrown."),
HIVE_SERVER2_TEZ_SESSION_CUSTOM_QUEUE_ALLOWED("hive.server2.tez.sessions.custom.queue.allowed",
"true", new StringSet("true", "false", "ignore"),
"Whether Tez session pool should allow submitting queries to custom queues. The options\n" +
"are true, false (error out), ignore (accept the query but ignore the queue setting)."),
// Operation log configuration
HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
"When true, HS2 will save operation logs and make them available for clients"),
HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location",
"${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator +
"operation_logs",
"Top level directory where operation logs are stored if logging functionality is enabled"),
HIVE_SERVER2_LOGGING_OPERATION_LEVEL("hive.server2.logging.operation.level", "EXECUTION",
new StringSet("NONE", "EXECUTION", "PERFORMANCE", "VERBOSE"),
"HS2 operation logging mode available to clients to be set at session level.\n" +
"For this to work, hive.server2.logging.operation.enabled should be set to true.\n" +
" NONE: Ignore any logging\n" +
" EXECUTION: Log completion of tasks\n" +
" PERFORMANCE: Execution + Performance logs \n" +
" VERBOSE: All logs" ),
// Enable metric collection for HiveServer2
HIVE_SERVER2_METRICS_ENABLED("hive.server2.metrics.enabled", false, "Enable metrics on the HiveServer2."),
// http (over thrift) transport settings
HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."),
HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice",
"Path component of URL endpoint when in HTTP mode."),
HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024,
"Maximum message size in bytes a HS2 server will accept."),
HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum idle time for a connection on the server when in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE("hive.server2.thrift.http.request.header.size", 6*1024,
"Request header size in bytes, when using HTTP transport mode. Jetty defaults used."),
HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE("hive.server2.thrift.http.response.header.size", 6*1024,
"Response header size in bytes, when using HTTP transport mode. Jetty defaults used."),
// Cookie based authentication when using HTTP Transport
HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true,
"When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"Maximum age in seconds for server side cookie used by HS2 in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null,
"Domain for the HS2 generated cookies"),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null,
"Path for the HS2 generated cookies"),
@Deprecated
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true,
"Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer2)."),
HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true,
"HttpOnly attribute of the HS2 generated cookie."),
// binary transport settings
HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
"Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."),
HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth",
new StringSet("auth", "auth-int", "auth-conf"),
"Sasl QOP value; set it to one of following values to enable higher levels of\n" +
"protection for HiveServer2 communication with clients.\n" +
"Setting hadoop.rpc.protection to a higher level than HiveServer2 does not\n" +
"make sense in most situations. HiveServer2 ignores hadoop.rpc.protection in favor\n" +
"of hive.server2.thrift.sasl.qop.\n" +
" \"auth\" - authentication only (default)\n" +
" \"auth-int\" - authentication plus integrity protection\n" +
" \"auth-conf\" - authentication plus integrity and confidentiality protection\n" +
"This is applicable only if HiveServer2 is configured to use Kerberos authentication."),
HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5,
"Minimum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500,
"Maximum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH(
"hive.server2.thrift.exponential.backoff.slot.length", "100ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Binary exponential backoff slot time for Thrift clients during login to HiveServer2,\n" +
"for retries until hitting Thrift client timeout"),
HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT("hive.server2.thrift.login.timeout", "20s",
new TimeValidator(TimeUnit.SECONDS), "Timeout for Thrift clients during login to HiveServer2"),
HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " +
"excessive threads are killed after this time interval."),
// Configuration for async thread pool in SessionManager
HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100,
"Number of threads in the async thread pool for HiveServer2"),
HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"How long HiveServer2 shutdown will wait for async threads to terminate."),
HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100,
"Size of the wait queue for async thread pool in HiveServer2.\n" +
"After hitting this limit, the async thread pool will reject new requests."),
HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" +
"to arrive before terminating"),
HIVE_SERVER2_ASYNC_EXEC_ASYNC_COMPILE("hive.server2.async.exec.async.compile", false,
"Whether to enable compiling async query asynchronously. If enabled, it is unknown if the query will have any resultset before compilation completed."),
HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"),
HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"),
// HiveServer2 auth configuration
HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"),
"Client authentication types.\n" +
" NONE: no authentication check\n" +
" LDAP: LDAP/AD based authentication\n" +
" KERBEROS: Kerberos/GSSAPI authentication\n" +
" CUSTOM: Custom authentication provider\n" +
" (Use with property hive.server2.custom.authentication.class)\n" +
" PAM: Pluggable authentication module\n" +
" NOSASL: Raw transport"),
HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true,
"Allow alternate user to be specified as part of HiveServer2 open connection request."),
HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", "",
"Kerberos keytab file for server principal"),
HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", "",
"Kerberos server principal"),
HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", "",
"keytab file for SPNego principal, optional,\n" +
"typical value would look like /etc/security/keytabs/spnego.service.keytab,\n" +
"This keytab would be used by HiveServer2 when Kerberos security is enabled and \n" +
"HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication.\n" +
"SPNego authentication would be honored only if valid\n" +
" hive.server2.authentication.spnego.principal\n" +
"and\n" +
" hive.server2.authentication.spnego.keytab\n" +
"are specified."),
HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", "",
"SPNego service principal, optional,\n" +
"typical value would look like HTTP/[email protected]\n" +
"SPNego service principal would be used by HiveServer2 when Kerberos security is enabled\n" +
"and HTTP transport mode is used.\n" +
"This needs to be set only if SPNEGO is to be used in authentication."),
HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null,
"LDAP connection URL(s),\n" +
"this value could contain URLs to mutiple LDAP servers instances for HA,\n" +
"each LDAP URL is separated by a SPACE character. URLs are used in the \n" +
" order specified until a connection is successful."),
HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"),
HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null, ""),
HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN("hive.server2.authentication.ldap.groupDNPattern", null,
"COLON-separated list of patterns to use to find DNs for group entities in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER("hive.server2.authentication.ldap.groupFilter", null,
"COMMA-separated list of LDAP Group names (short name not full DNs).\n" +
"For example: HiveAdmins,HadoopAdmins,Administrators"),
HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN("hive.server2.authentication.ldap.userDNPattern", null,
"COLON-separated list of patterns to use to find DNs for users in this directory.\n" +
"Use %s where the actual group name is to be substituted for.\n" +
"For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com."),
HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null,
"COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" +
"For example: hiveuser,impalauser,hiveadmin,hadoopadmin"),
HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid",
"LDAP attribute name whose values are unique in this LDAP server.\n" +
"For example: uid or CN."),
HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member",
"LDAP attribute name on the group object that contains the list of distinguished names\n" +
"for the user, group, and contact objects that are members of the group.\n" +
"For example: member, uniqueMember or memberUid"),
HIVE_SERVER2_PLAIN_LDAP_USERMEMBERSHIP_KEY(HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME, null,
"LDAP attribute name on the user object that contains groups of which the user is\n" +
"a direct member, except for the primary group, which is represented by the\n" +
"primaryGroupId.\n" +
"For example: memberOf"),
HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames",
"LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" +
"For example: group, groupOfNames or groupOfUniqueNames."),
HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null,
"A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" +
"If this query returns a null resultset, the LDAP Provider fails the Authentication\n" +
"request, succeeds if the user is part of the resultset." +
"For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n" +
"(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)" +
"(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))"),
HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null,
"Custom authentication class. Used when property\n" +
"'hive.server2.authentication' is set to 'CUSTOM'. Provided class\n" +
"must be a proper implementation of the interface\n" +
"org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" +
"will call its Authenticate(user, passed) method to authenticate requests.\n" +
"The implementation may optionally implement Hadoop's\n" +
"org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."),
HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null,
"List of the underlying pam services that should be used when auth type is PAM\n" +
"A file with the same name must exist in /etc/pam.d"),
HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true,
"Setting this property to true will have HiveServer2 execute\n" +
"Hive operations as the user making the calls to it."),
HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC", new StringSet("CLASSIC", "HIVE"),
"This setting reflects how HiveServer2 will report the table types for JDBC and other\n" +
"client implementations that retrieve the available tables and supported table types\n" +
" HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n" +
" CLASSIC : More generic types like TABLE and VIEW"),
HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", "", ""),
// SSL settings
HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false,
"Set this to true for using SSL encryption in HiveServer2."),
HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", "",
"SSL certificate keystore location."),
HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", "",
"SSL certificate keystore password."),
HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE("hive.server2.map.fair.scheduler.queue", true,
"If the YARN fair scheduler is configured and HiveServer2 is running in non-impersonation mode,\n" +
"this setting determines the user for fair scheduler queue mapping.\n" +
"If set to true (default), the logged-in user determines the fair scheduler queue\n" +
"for submitted jobs, so that map reduce resource usage can be tracked by user.\n" +
"If set to false, all Hive jobs go to the 'hive' user's queue."),
HIVE_SERVER2_BUILTIN_UDF_WHITELIST("hive.server2.builtin.udf.whitelist", "",
"Comma separated list of builtin udf names allowed in queries.\n" +
"An empty whitelist allows all builtin udfs to be executed. " +
" The udf black list takes precedence over udf white list"),
HIVE_SERVER2_BUILTIN_UDF_BLACKLIST("hive.server2.builtin.udf.blacklist", "",
"Comma separated list of udfs names. These udfs will not be allowed in queries." +
" The udf black list takes precedence over udf white list"),
HIVE_ALLOW_UDF_LOAD_ON_DEMAND("hive.allow.udf.load.on.demand", false,
"Whether enable loading UDFs from metastore on demand; this is mostly relevant for\n" +
"HS2 and was the default behavior before Hive 1.2. Off by default."),
HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
"The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
"Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d",
new TimeValidator(TimeUnit.MILLISECONDS),
"Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
" With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
" With negative value, it's checked for all of the operations regardless of state."),
HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION("hive.server2.idle.session.check.operation", true,
"Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" +
" This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" +
"(hive.server2.session.check.interval) are enabled."),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " +
"failure of Thrift HiveServer2 calls"),
HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " +
"retries while opening a connection to HiveServe2"),
HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s",
new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " +
"consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"),
HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" +
" client"),
HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " +
"thrift client"),
// ResultSet serialization settings
HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS("hive.server2.thrift.resultset.serialize.in.tasks", false,
"Whether we should serialize the Thrift structures used in JDBC ResultSet RPC in task nodes.\n " +
"We use SequenceFile and ThriftJDBCBinarySerDe to read and write the final results if this is true."),
// TODO: Make use of this config to configure fetch size
HIVE_SERVER2_THRIFT_RESULTSET_MAX_FETCH_SIZE("hive.server2.thrift.resultset.max.fetch.size",
10000, "Max number of rows sent in one Fetch RPC call by the server to the client."),
HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE("hive.server2.thrift.resultset.default.fetch.size", 1000,
"The number of rows sent in one Fetch RPC call by the server to the client, if not\n" +
"specified by the client."),
HIVE_SERVER2_XSRF_FILTER_ENABLED("hive.server2.xsrf.filter.enabled",false,
"If enabled, HiveServer2 will block any requests made to it over http " +
"if an X-XSRF-HEADER header is not present"),
HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile",
"Comma separated list of non-SQL Hive commands users are authorized to execute"),
HIVE_SERVER2_JOB_CREDENTIAL_PROVIDER_PATH("hive.server2.job.credential.provider.path", "",
"If set, this configuration property should provide a comma-separated list of URLs that indicates the type and " +
"location of providers to be used by hadoop credential provider API. It provides HiveServer2 the ability to provide job-specific " +
"credential providers for jobs run using MR and Spark execution engines. This functionality has not been tested against Tez."),
HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 15, new SizeValidator(0L, true, 1024L, true), "Number of threads"
+ " used to move files in move task. Set it to 0 to disable multi-threaded file moves. This parameter is also used by"
+ " MSCK to check tables."),
HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT("hive.load.dynamic.partitions.thread", 15,
new SizeValidator(1L, true, 1024L, true),
"Number of threads used to load dynamic partitions."),
// If this is set all move tasks at the end of a multi-insert query will only begin once all
// outputs are ready
HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
"hive.multi.insert.move.tasks.share.dependencies", false,
"If this is set all move tasks for tables/partitions (not directories) at the end of a\n" +
"multi-insert query will only begin once the dependencies for all these move tasks have been\n" +
"met.\n" +
"Advantages: If concurrency is enabled, the locks will only be released once the query has\n" +
" finished, so with this config enabled, the time when the table/partition is\n" +
" generated will be much closer to when the lock on it is released.\n" +
"Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n" +
" are produced by this query and finish earlier will be available for querying\n" +
" much earlier. Since the locks are only released once the query finishes, this\n" +
" does not apply if concurrency is enabled."),
HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false,
"If this is set, when writing partitions, the metadata will include the bucketing/sorting\n" +
"properties with which the data was written if any (this will not overwrite the metadata\n" +
"inherited from the table if the table is bucketed/sorted)"),
HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO(
"hive.exec.infer.bucket.sort.num.buckets.power.two", false,
"If this is set, when setting the number of reducers for the map reduce task which writes the\n" +
"final output files, it will choose a number which is a power of two, unless the user specifies\n" +
"the number of reducers to use using mapred.reduce.tasks. The number of reducers\n" +
"may be set to a power of two, only to be followed by a merge task meaning preventing\n" +
"anything from being inferred.\n" +
"With hive.exec.infer.bucket.sort set to true:\n" +
"Advantages: If this is not set, the number of buckets for partitions will seem arbitrary,\n" +
" which means that the number of mappers used for optimized joins, for example, will\n" +
" be very low. With this set, since the number of buckets used for any partition is\n" +
" a power of two, the number of mappers used for optimized joins will be the least\n" +
" number of buckets used by any partition being joined.\n" +
"Disadvantages: This may mean a much larger or much smaller number of reducers being used in the\n" +
" final map reduce job, e.g. if a job was originally going to take 257 reducers,\n" +
" it will now take 512 reducers, similarly if the max number of reducers is 511,\n" +
" and a job was going to use this many, it will now use 256 reducers."),
HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false,
"Enable list bucketing optimizer. Default value is false so that we disable it by default."),
// Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."),
SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."),
HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false,
"Whether to show the unquoted partition names in query results."),
HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet("mr", "tez", "spark"),
"Chooses execution engine. Options are: mr (Map reduce, default), tez, spark. While MR\n" +
"remains the default engine for historical reasons, it is itself a historical engine\n" +
"and is deprecated in Hive 2 line. It may be removed without further warning."),
HIVE_EXECUTION_MODE("hive.execution.mode", "container", new StringSet("container", "llap"),
"Chooses whether query fragments will run in container or in llap"),
HIVE_JAR_DIRECTORY("hive.jar.directory", null,
"This is the location hive in tez mode will look for to find a site wide \n" +
"installed hive instance."),
HIVE_USER_INSTALL_DIR("hive.user.install.directory", "/user/",
"If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \n" +
"it will upload the hive jar to \"hive.user.install.directory/user.name\"\n" +
"and use it to run queries."),
// Vectorization enabled
HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", false,
"This flag should be set to true to enable vectorized mode of query execution.\n" +
"The default value is false."),
HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true,
"This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true,
"This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" +
"of queries using MapJoin.\n" +
"The default value is true."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false,
"This flag should be set to true to restrict use of native vector map join hash tables to\n" +
"the MultiKey in queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false,
"This flag should be set to true to enable vector map join hash tables to\n" +
"use max / max filtering for integer join queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1,
"The number of small table rows for a match in vector map join hash tables\n" +
"where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" +
"A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."),
HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false,
"This flag should be set to true to enable use of native fast vector map join hash tables in\n" +
"queries using MapJoin.\n" +
"The default value is false."),
HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000,
"Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."),
HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000,
"Max number of entries in the vector group by aggregation hashtables. \n" +
"Exceeding this will trigger a flush irrelevant of memory pressure condition."),
HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
"Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true,
"This flag should be set to true to enable the new vectorization\n" +
"of queries using ReduceSink.\ni" +
"The default value is true."),
HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT("hive.vectorized.use.vectorized.input.format", true,
"This flag should be set to true to enable vectorizing with vectorized input file format capable SerDe.\n" +
"The default value is true."),
HIVE_VECTORIZATION_USE_VECTOR_DESERIALIZE("hive.vectorized.use.vector.serde.deserialize", true,
"This flag should be set to true to enable vectorizing rows using vector deserialize.\n" +
"The default value is true."),
HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", false,
"This flag should be set to true to enable vectorizing using row deserialize.\n" +
"The default value is false."),
HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", "all", new StringSet("none", "chosen", "all"),
"Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a cooresponding vectorized class.\n" +
"0. none : disable any usage of VectorUDFAdaptor\n" +
"1. chosen : use VectorUDFAdaptor for a small set of UDFs that were choosen for good performance\n" +
"2. all : use VectorUDFAdaptor for all UDFs"
),
HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control "
+ "whether to check, convert, and normalize partition value to conform to its column type in "
+ "partition operations including but not limited to insert, such as alter, describe etc."),
HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null,
"For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" +
"using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."),
HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false,
"Whether to send the query plan via local resource or RPC"),
HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true,
"Whether to generate the splits locally or in the AM (tez only)"),
HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true,
"Whether to generate consistent split locations when generating splits in the AM"),
HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"),
HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"),
HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""),
HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""),
HIVECOUNTERGROUP("hive.counters.group.name", "HIVE",
"The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"),
HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
new StringSet("none", "column"),
"Whether to use quoted identifier. 'none' or 'column' can be used. \n" +
" none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
" column: implies column names can contain any character."
),
HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true,
"This flag should be set to true to enable support for special characters in table names.\n"
+ "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
// role names are case-insensitive
USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false,
"Comma separated list of users who are in admin role for bootstrapping.\n" +
"More users can be added in ADMIN role later."),
HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL,
"Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\n" +
"Setting to 0.12:\n" +
" Maintains division behavior: int / int = double"),
HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", false,
"Whether joins can be automatically converted to bucket map joins in hive \n" +
"when tez is used as the execution engine."),
HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true,
"Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."),
HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Time to wait for another thread to localize the same resource for hive-tez."),
HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5,
"The number of attempts waiting for localizing a resource in hive-tez."),
TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false,
"Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" +
"and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" +
"necessary."),
TEZ_LLAP_MIN_REDUCER_PER_EXECUTOR("hive.tez.llap.min.reducer.per.executor", 0.95f,
"If above 0, the min number of reducers for auto-parallelism for LLAP scheduling will\n" +
"be set to this fraction of the number of executors."),
TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f,
"When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."),
TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f,
"When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\n" +
"of reducers that tez specifies."),
TEZ_OPTIMIZE_BUCKET_PRUNING(
"hive.tez.bucket.pruning", false,
"When pruning is enabled, filters on bucket columns will be processed by \n" +
"filtering the splits against a bitset of included buckets. This needs predicates \n"+
"produced by hive.optimize.ppd and hive.optimize.index.filters."),
TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
"hive.tez.bucket.pruning.compat", true,
"When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" +
"This occasionally doubles the data scan cost, but is default enabled for safety"),
TEZ_DYNAMIC_PARTITION_PRUNING(
"hive.tez.dynamic.partition.pruning", true,
"When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" +
"events from the processing vertices to the Tez application master. These events will be\n" +
"used to prune unnecessary partitions."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L,
"Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."),
TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE("hive.tez.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size of events in dynamic pruning."),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION("hive.tez.dynamic.semijoin.reduction", true,
"When dynamic semijoin is enabled, shuffle joins will perform a leaky semijoin before shuffle. This " +
"requires hive.tez.dynamic.partition.pruning to be enabled."),
TEZ_MIN_BLOOM_FILTER_ENTRIES("hive.tez.min.bloom.filter.entries", 1000000L,
"Bloom filter should be of at min certain size to be effective"),
TEZ_MAX_BLOOM_FILTER_ENTRIES("hive.tez.max.bloom.filter.entries", 100000000L,
"Bloom filter should be of at max certain size to be effective"),
TEZ_BLOOM_FILTER_FACTOR("hive.tez.bloom.filter.factor", (float) 2.0,
"Bloom filter should be a multiple of this factor with nDV"),
TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION("hive.tez.bigtable.minsize.semijoin.reduction", 100000000L,
"Big table for runtime filteting should be of atleast this size"),
TEZ_DYNAMIC_SEMIJOIN_REDUCTION_THRESHOLD("hive.tez.dynamic.semijoin.reduction.threshold", (float) 0.50,
"Only perform semijoin optimization if the estimated benefit at or above this fraction of the target table"),
TEZ_SMB_NUMBER_WAVES(
"hive.tez.smb.number.waves",
(float) 0.5,
"The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."),
TEZ_EXEC_SUMMARY(
"hive.tez.exec.print.summary",
false,
"Display breakdown of execution steps, for every query executed by the shell."),
TEZ_EXEC_INPLACE_PROGRESS(
"hive.tez.exec.inplace.progress",
true,
"Updates tez job execution progress in-place in the terminal when hive-cli is used."),
HIVE_SERVER2_INPLACE_PROGRESS(
"hive.server2.in.place.progress",
true,
"Allows hive server 2 to send progress bar update information. This is currently available"
+ " only if the execution engine is tez."),
SPARK_EXEC_INPLACE_PROGRESS("hive.spark.exec.inplace.progress", true,
"Updates spark job execution progress in-place in the terminal."),
TEZ_CONTAINER_MAX_JAVA_HEAP_FRACTION("hive.tez.container.max.java.heap.fraction", 0.8f,
"This is to override the tez setting with the same name"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MIN("hive.tez.task.scale.memory.reserve-fraction.min",
0.3f, "This is to override the tez setting tez.task.scale.memory.reserve-fraction"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION_MAX("hive.tez.task.scale.memory.reserve.fraction.max",
0.5f, "The maximum fraction of JVM memory which Tez will reserve for the processor"),
TEZ_TASK_SCALE_MEMORY_RESERVE_FRACTION("hive.tez.task.scale.memory.reserve.fraction",
-1f, "The customized fraction of JVM memory which Tez will reserve for the processor"),
// The default is different on the client and server, so it's null here.
LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."),
LLAP_IO_NONVECTOR_WRAPPER_ENABLED("hive.llap.io.nonvector.wrapper.enabled", true,
"Whether the LLAP IO layer is enabled for non-vectorized queries that read inputs\n" +
"that can be vectorized"),
LLAP_IO_MEMORY_MODE("hive.llap.io.memory.mode", "cache",
new StringSet("cache", "none"),
"LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" +
"custom off-heap allocator, 'none' doesn't use either (this mode may result in\n" +
"significant performance degradation)"),
LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "256Kb", new SizeValidator(),
"Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" +
"padded to minimum allocation. For ORC, should generally be the same as the expected\n" +
"compression buffer size, or next lowest power of 2. Must be a power of 2."),
LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(),
"Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" +
"the largest expected ORC compression buffer size. Must be a power of 2."),
@Deprecated
LLAP_IO_METADATA_FRACTION("hive.llap.io.metadata.fraction", 0.1f,
"Temporary setting for on-heap metadata cache fraction of xmx, set to avoid potential\n" +
"heap problems on very large datasets when on-heap metadata cache takes over\n" +
"everything. -1 managed metadata and data together (which is more flexible). This\n" +
"setting will be removed (in effect become -1) once ORC metadata cache is moved off-heap."),
LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8,
"Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" +
"(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" +
"not the case, an adjusted size will be used. Using powers of 2 is recommended."),
LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(),
"Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
"Whether ORC low-level cache should use direct allocation."),
LLAP_ALLOCATOR_MAPPED("hive.llap.io.allocator.mmap", false,
"Whether ORC low-level cache should use memory mapped allocation (direct I/O). \n" +
"This is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage."),
LLAP_ALLOCATOR_MAPPED_PATH("hive.llap.io.allocator.mmap.path", "/tmp",
new WritableDirectoryValidator(),
"The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache."),
LLAP_USE_LRFU("hive.llap.io.use.lrfu", true,
"Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."),
LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f,
"Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" +
"behave like LFU, 1 makes it behave like LRU, values in between balance accordingly."),
LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", false,
"Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" +
"like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" +
"FS), the cache would not work by default because LLAP is unable to uniquely track the\n" +
"files; enabling this setting allows LLAP to generate file ID from the path, size and\n" +
"modification time, which is almost certain to identify file uniquely. However, if you\n" +
"use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" +
"to avoid this setting."),
LLAP_CACHE_ENABLE_ORC_GAP_CACHE("hive.llap.orc.gap.cache", true,
"Whether LLAP cache for ORC should remember gaps in ORC compression buffer read\n" +
"estimates, to avoid re-reading the data that was read once and discarded because it\n" +
"is unneeded. This is only necessary for ORC files written before HIVE-9660."),
LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true,
"Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" +
"cases of file overwrites. This is supported on HDFS."),
// Restricted to text for now as this is a new feature; only text files can be sliced.
LLAP_IO_ENCODE_ENABLED("hive.llap.io.encode.enabled", true,
"Whether LLAP should try to re-encode and cache data for non-ORC formats. This is used\n" +
"on LLAP Server side to determine if the infrastructure for that is initialized."),
LLAP_IO_ENCODE_FORMATS("hive.llap.io.encode.formats",
"org.apache.hadoop.mapred.TextInputFormat,",
"The table input formats for which LLAP IO should re-encode and cache data.\n" +
"Comma-separated list."),
LLAP_IO_ENCODE_ALLOC_SIZE("hive.llap.io.encode.alloc.size", "256Kb", new SizeValidator(),
"Allocation size for the buffers used to cache encoded data from non-ORC files. Must\n" +
"be a power of two between " + LLAP_ALLOCATOR_MIN_ALLOC + " and\n" +
LLAP_ALLOCATOR_MAX_ALLOC + "."),
LLAP_IO_ENCODE_VECTOR_SERDE_ENABLED("hive.llap.io.encode.vector.serde.enabled", true,
"Whether LLAP should use vectorized SerDe reader to read text data when re-encoding."),
LLAP_IO_ENCODE_VECTOR_SERDE_ASYNC_ENABLED("hive.llap.io.encode.vector.serde.async.enabled",
true,
"Whether LLAP should use async mode in vectorized SerDe reader to read text data."),
LLAP_IO_ENCODE_SLICE_ROW_COUNT("hive.llap.io.encode.slice.row.count", 100000,
"Row count to use to separate cache slices when reading encoded data from row-based\n" +
"inputs into LLAP cache, if this feature is enabled."),
LLAP_IO_ENCODE_SLICE_LRR("hive.llap.io.encode.slice.lrr", true,
"Whether to separate cache slices when reading encoded data from text inputs via MR\n" +
"MR LineRecordRedader into LLAP cache, if this feature is enabled. Safety flag."),
LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true,
"Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"),
LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", false,
"Whether or not to allow the planner to run vertices in the AM."),
LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true,
"Enforce that all parents are in llap, before considering vertex"),
LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true,
"Enforce that inputs are vectorized, before considering vertex"),
LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true,
"Enforce that col stats are available, before considering vertex"),
LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L,
"Check input size, before considering vertex (-1 disables check)"),
LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L,
"Check output size, before considering vertex (-1 disables check)"),
LLAP_SKIP_COMPILE_UDF_CHECK("hive.llap.skip.compile.udf.check", false,
"Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\n" +
"execute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\n" +
"jars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load."),
LLAP_ALLOW_PERMANENT_FNS("hive.llap.allow.permanent.fns", true,
"Whether LLAP decider should allow permanent UDFs."),
LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none",
new StringSet("auto", "none", "all", "map", "only"),
"Chooses whether query fragments will run in container or in llap"),
LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true,
"Cache objects (plans, hashtables, etc) in llap"),
LLAP_IO_DECODING_METRICS_PERCENTILE_INTERVALS("hive.llap.io.decoding.metrics.percentiles.intervals", "30",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
"for percentile latency metrics on the LLAP daemon IO decoding time.\n" +
"hive.llap.queue.metrics.percentiles.intervals"),
LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10,
"Specify the number of threads to use for low-level IO thread pool."),
LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "",
"The name of the LLAP daemon's service principal."),
LLAP_KERBEROS_KEYTAB_FILE("hive.llap.daemon.keytab.file", "",
"The path to the Kerberos Keytab file containing the LLAP daemon's service principal."),
LLAP_ZKSM_KERBEROS_PRINCIPAL("hive.llap.zk.sm.principal", "",
"The name of the principal to use to talk to ZooKeeper for ZooKeeper SecretManager."),
LLAP_ZKSM_KERBEROS_KEYTAB_FILE("hive.llap.zk.sm.keytab.file", "",
"The path to the Kerberos Keytab file containing the principal to use to talk to\n" +
"ZooKeeper for ZooKeeper SecretManager."),
LLAP_WEBUI_SPNEGO_KEYTAB_FILE("hive.llap.webui.spnego.keytab", "",
"The path to the Kerberos Keytab file containing the LLAP WebUI SPNEGO principal.\n" +
"Typical value would look like /etc/security/keytabs/spnego.service.keytab."),
LLAP_WEBUI_SPNEGO_PRINCIPAL("hive.llap.webui.spnego.principal", "",
"The LLAP WebUI SPNEGO service principal. Configured similarly to\n" +
"hive.server2.webui.spnego.principal"),
LLAP_FS_KERBEROS_PRINCIPAL("hive.llap.task.principal", "",
"The name of the principal to use to run tasks. By default, the clients are required\n" +
"to provide tokens to access HDFS/etc."),
LLAP_FS_KERBEROS_KEYTAB_FILE("hive.llap.task.keytab.file", "",
"The path to the Kerberos Keytab file containing the principal to use to run tasks.\n" +
"By default, the clients are required to provide tokens to access HDFS/etc."),
LLAP_ZKSM_ZK_CONNECTION_STRING("hive.llap.zk.sm.connectionString", "",
"ZooKeeper connection string for ZooKeeper SecretManager."),
LLAP_ZK_REGISTRY_USER("hive.llap.zk.registry.user", "",
"In the LLAP ZooKeeper-based registry, specifies the username in the Zookeeper path.\n" +
"This should be the hive user or whichever user is running the LLAP daemon."),
LLAP_ZK_REGISTRY_NAMESPACE("hive.llap.zk.registry.namespace", null,
"In the LLAP ZooKeeper-based registry, overrides the ZK path namespace. Note that\n" +
"using this makes the path management (e.g. setting correct ACLs) your responsibility."),
// Note: do not rename to ..service.acl; Hadoop generates .hosts setting name from this,
// resulting in a collision with existing hive.llap.daemon.service.hosts and bizarre errors.
// These are read by Hadoop IPC, so you should check the usage and naming conventions (e.g.
// ".blocked" is a string hardcoded by Hadoop, and defaults are enforced elsewhere in Hive)
// before making changes or copy-pasting these.
LLAP_SECURITY_ACL("hive.llap.daemon.acl", "*", "The ACL for LLAP daemon."),
LLAP_SECURITY_ACL_DENY("hive.llap.daemon.acl.blocked", "", "The deny ACL for LLAP daemon."),
LLAP_MANAGEMENT_ACL("hive.llap.management.acl", "*", "The ACL for LLAP daemon management."),
LLAP_MANAGEMENT_ACL_DENY("hive.llap.management.acl.blocked", "",
"The deny ACL for LLAP daemon management."),
LLAP_REMOTE_TOKEN_REQUIRES_SIGNING("hive.llap.remote.token.requires.signing", "true",
new StringSet("false", "except_llap_owner", "true"),
"Whether the token returned from LLAP management API should require fragment signing.\n" +
"True by default; can be disabled to allow CLI to get tokens from LLAP in a secure\n" +
"cluster by setting it to true or 'except_llap_owner' (the latter returns such tokens\n" +
"to everyone except the user LLAP cluster is authenticating under)."),
// Hadoop DelegationTokenManager default is 1 week.
LLAP_DELEGATION_TOKEN_LIFETIME("hive.llap.daemon.delegation.token.lifetime", "14d",
new TimeValidator(TimeUnit.SECONDS),
"LLAP delegation token lifetime, in seconds if specified without a unit."),
LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004,
"RPC port for LLAP daemon management service."),
LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", false,
"Whether or not to set Hadoop configs to enable auth in LLAP web app."),
LLAP_DAEMON_RPC_NUM_HANDLERS("hive.llap.daemon.rpc.num.handlers", 5,
"Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers"),
LLAP_DAEMON_WORK_DIRS("hive.llap.daemon.work.dirs", "",
"Working directories for the daemon. This should not be set if running as a YARN\n" +
"application via Slider. It must be set when not running via Slider on YARN. If the value\n" +
"is set when running as a Slider YARN application, the specified value will be used.",
"llap.daemon.work.dirs"),
LLAP_DAEMON_YARN_SHUFFLE_PORT("hive.llap.daemon.yarn.shuffle.port", 15551,
"YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port"),
LLAP_DAEMON_YARN_CONTAINER_MB("hive.llap.daemon.yarn.container.mb", -1,
"llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb"),
LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null,
"Queue name within which the llap slider application will run." +
" Used in LlapServiceDriver and package.py"),
// TODO Move the following 2 properties out of Configuration to a constant.
LLAP_DAEMON_CONTAINER_ID("hive.llap.daemon.container.id", null,
"ContainerId of a running LlapDaemon. Used to publish to the registry"),
LLAP_DAEMON_NM_ADDRESS("hive.llap.daemon.nm.address", null,
"NM Address host:rpcPort for the NodeManager on which the instance of the daemon is running.\n" +
"Published to the llap registry. Should never be set by users"),
LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false,
"TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"),
LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS(
"hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\n" +
"interval, but otherwise as high as possible to avoid unnecessary traffic.",
"llap.daemon.am.liveness.heartbeat.interval-ms"),
LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS(
"hive.llap.am.liveness.connection.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Amount of time to wait on connection failures to the AM from an LLAP daemon before\n" +
"considering the AM to be dead.", "llap.am.liveness.connection.timeout-millis"),
LLAP_DAEMON_AM_USE_FQDN("hive.llap.am.use.fqdn", false,
"Whether to use FQDN of the AM machine when submitting work to LLAP."),
// Not used yet - since the Writable RPC engine does not support this policy.
LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration while waiting to retry connection failures to the AM from the daemon for\n" +
"the general keep-alive thread (milliseconds).",
"llap.am.liveness.connection.sleep-between-retries-millis"),
LLAP_DAEMON_TASK_SCHEDULER_TIMEOUT_SECONDS(
"hive.llap.task.scheduler.timeout.seconds", "60s",
new TimeValidator(TimeUnit.SECONDS),
"Amount of time to wait before failing the query when there are no llap daemons running\n" +
"(alive) in the cluster.", "llap.daemon.scheduler.timeout.seconds"),
LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4,
"Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" +
"executed in parallel.", "llap.daemon.num.executors"),
LLAP_MAPJOIN_MEMORY_OVERSUBSCRIBE_FACTOR("hive.llap.mapjoin.memory.oversubscribe.factor", 0.2f,
"Fraction of memory from hive.auto.convert.join.noconditionaltask.size that can be over subscribed\n" +
"by queries running in LLAP mode. This factor has to be from 0.0 to 1.0. Default is 20% over subscription.\n"),
LLAP_MEMORY_OVERSUBSCRIPTION_MAX_EXECUTORS_PER_QUERY("hive.llap.memory.oversubscription.max.executors.per.query", 3,
"Used along with hive.llap.mapjoin.memory.oversubscribe.factor to limit the number of executors from\n" +
"which memory for mapjoin can be borrowed. Default 3 (from 3 other executors\n" +
"hive.llap.mapjoin.memory.oversubscribe.factor amount of memory can be borrowed based on which mapjoin\n" +
"conversion decision will be made). This is only an upper bound. Lower bound is determined by number of\n" +
"executors and configured max concurrency."),
LLAP_MAPJOIN_MEMORY_MONITOR_CHECK_INTERVAL("hive.llap.mapjoin.memory.monitor.check.interval", 100000L,
"Check memory usage of mapjoin hash tables after every interval of this many rows. If map join hash table\n" +
"memory usage exceeds (hive.auto.convert.join.noconditionaltask.size * hive.hash.table.inflation.factor)\n" +
"when running in LLAP, tasks will get killed and not retried. Set the value to 0 to disable this feature."),
LLAP_DAEMON_AM_REPORTER_MAX_THREADS("hive.llap.daemon.am-reporter.max.threads", 4,
"Maximum number of threads to be used for AM reporter. If this is lower than number of\n" +
"executors in llap daemon, it would be set to number of executors at runtime.",
"llap.daemon.am-reporter.max.threads"),
LLAP_DAEMON_RPC_PORT("hive.llap.daemon.rpc.port", 0, "The LLAP daemon RPC port.",
"llap.daemon.rpc.port. A value of 0 indicates a dynamic port"),
LLAP_DAEMON_MEMORY_PER_INSTANCE_MB("hive.llap.daemon.memory.per.instance.mb", 4096,
"The total amount of memory to use for the executors inside LLAP (in megabytes).",
"llap.daemon.memory.per.instance.mb"),
LLAP_DAEMON_XMX_HEADROOM("hive.llap.daemon.xmx.headroom", "5%",
"The total amount of heap memory set aside by LLAP and not used by the executors. Can\n" +
"be specified as size (e.g. '512Mb'), or percentage (e.g. '5%'). Note that the latter is\n" +
"derived from the total daemon XMX, which can be different from the total executor\n" +
"memory if the cache is on-heap; although that's not the default configuration."),
LLAP_DAEMON_VCPUS_PER_INSTANCE("hive.llap.daemon.vcpus.per.instance", 4,
"The total number of vcpus to use for the executors inside LLAP.",
"llap.daemon.vcpus.per.instance"),
LLAP_DAEMON_NUM_FILE_CLEANER_THREADS("hive.llap.daemon.num.file.cleaner.threads", 1,
"Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads"),
LLAP_FILE_CLEANUP_DELAY_SECONDS("hive.llap.file.cleanup.delay.seconds", "300s",
new TimeValidator(TimeUnit.SECONDS),
"How long to delay before cleaning up query files in LLAP (in seconds, for debugging).",
"llap.file.cleanup.delay-seconds"),
LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null,
"Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" +
"YARN registry is used.", "llap.daemon.service.hosts"),
LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s",
new TimeValidator(TimeUnit.SECONDS),
"LLAP YARN registry service list refresh delay, in seconds.",
"llap.daemon.service.refresh.interval"),
LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10,
"Number of threads to use in LLAP task communicator in Tez AM.",
"llap.daemon.communicator.num.threads"),
LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS("hive.llap.daemon.download.permanent.fns", false,
"Whether LLAP daemon should localize the resources for permanent UDFs."),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Minimum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.min.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS(
"hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Maximum time after which a previously disabled node will be re-enabled for scheduling,\n" +
"in milliseconds. This may be modified by an exponential back-off if failures persist.",
"llap.task.scheduler.node.re-enable.max.timeout.ms"),
LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR(
"hive.llap.task.scheduler.node.disable.backoff.factor", 1.5f,
"Backoff factor on successive blacklists of a node due to some failures. Blacklist times\n" +
"start at the min timeout and go up to the max timeout based on this backoff factor.",
"llap.task.scheduler.node.disable.backoff.factor"),
LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE(
"hive.llap.task.scheduler.num.schedulable.tasks.per.node", 0,
"The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\n" +
"this should be picked up from the Registry. -1 indicates unlimited capacity; positive\n" +
"values indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node"),
LLAP_TASK_SCHEDULER_LOCALITY_DELAY(
"hive.llap.task.scheduler.locality.delay", "0ms",
new TimeValidator(TimeUnit.MILLISECONDS, -1l, true, Long.MAX_VALUE, true),
"Amount of time to wait before allocating a request which contains location information," +
" to a location other than the ones requested. Set to -1 for an infinite delay, 0" +
"for no delay."
),
LLAP_DAEMON_TASK_PREEMPTION_METRICS_INTERVALS(
"hive.llap.daemon.task.preemption.metrics.intervals", "30,60,300",
"Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" +
" for percentile latency metrics. Used by LLAP daemon task scheduler metrics for\n" +
" time taken to kill task (due to pre-emption) and useful time wasted by the task that\n" +
" is about to be preempted."
),
LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size",
10, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size"),
LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME(
"hive.llap.daemon.wait.queue.comparator.class.name",
"org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator",
"The priority comparator to use for LLAP scheduler prioroty queue. The built-in options\n" +
"are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" +
".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"),
LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION(
"hive.llap.daemon.task.scheduler.enable.preemption", true,
"Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\n" +
"preempted by finishable tasks inside LLAP scheduler.",
"llap.daemon.task.scheduler.enable.preemption"),
LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS(
"hive.llap.task.communicator.connection.timeout.ms", "16000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.",
"llap.task.communicator.connection.timeout-millis"),
LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT(
"hive.llap.task.communicator.listener.thread-count", 30,
"The number of task communicator listener threads."),
LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS(
"hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms",
new TimeValidator(TimeUnit.MILLISECONDS),
"Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\n" +
"connection to LLAP daemon from Tez AM.",
"llap.task.communicator.connection.sleep-between-retries-millis"),
LLAP_DAEMON_WEB_PORT("hive.llap.daemon.web.port", 15002, "LLAP daemon web UI port.",
"llap.daemon.service.port"),
LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false,
"Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"),
LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits", false,
"Whether to setup split locations to match nodes on which llap daemons are running, " +
"instead of using the locations provided by the split itself. If there is no llap daemon " +
"running, fall back to locations provided by the split. This is effective only if " +
"hive.execution.mode is llap"),
LLAP_VALIDATE_ACLS("hive.llap.validate.acls", true,
"Whether LLAP should reject permissive ACLs in some cases (e.g. its own management\n" +
"protocol or ZK paths), similar to how ssh refuses a key with bad access permissions."),
LLAP_DAEMON_OUTPUT_SERVICE_PORT("hive.llap.daemon.output.service.port", 15003,
"LLAP daemon output service port"),
LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT("hive.llap.daemon.output.stream.timeout", "120s",
new TimeValidator(TimeUnit.SECONDS),
"The timeout for the client to connect to LLAP output service and start the fragment\n" +
"output after sending the fragment. The fragment will fail if its output is not claimed."),
LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE("hive.llap.daemon.output.service.send.buffer.size",
128 * 1024, "Send buffer size to be used by LLAP daemon output service"),
LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES("hive.llap.daemon.output.service.max.pending.writes",
8, "Maximum number of queued writes allowed per connection when sending data\n" +
" via the LLAP output service to external clients."),
LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", false,
"Override if grace join should be allowed to run in llap."),
LLAP_HS2_ENABLE_COORDINATOR("hive.llap.hs2.coordinator.enabled", true,
"Whether to create the LLAP coordinator; since execution engine and container vs llap\n" +
"settings are both coming from job configs, we don't know at start whether this should\n" +
"be created. Default true."),
LLAP_DAEMON_LOGGER("hive.llap.daemon.logger", Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
new StringSet(Constants.LLAP_LOGGER_NAME_QUERY_ROUTING,
Constants.LLAP_LOGGER_NAME_RFA,
Constants.LLAP_LOGGER_NAME_CONSOLE),
"logger used for llap-daemons."),
SPARK_USE_OP_STATS("hive.spark.use.op.stats", true,
"Whether to use operator stats to determine reducer parallelism for Hive on Spark.\n" +
"If this is false, Hive will use source table stats to determine reducer\n" +
"parallelism for all first level reduce tasks, and the maximum reducer parallelism\n" +
"from all parents for all the rest (second level and onward) reducer tasks."),
SPARK_USE_TS_STATS_FOR_MAPJOIN("hive.spark.use.ts.stats.for.mapjoin", false,
"If this is set to true, mapjoin optimization in Hive/Spark will use statistics from\n" +
"TableScan operators at the root of operator tree, instead of parent ReduceSink\n" +
"operators of the Join operator."),
SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for requests from Hive client to remote Spark driver."),
SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout",
"60s", new TimeValidator(TimeUnit.SECONDS),
"Timeout for job monitor to get Spark job state."),
SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout",
"1000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for remote Spark driver in connecting back to Hive client."),
SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout",
"90000ms", new TimeValidator(TimeUnit.MILLISECONDS),
"Timeout for handshake between Hive client and remote Spark driver. Checked by both processes."),
SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256",
"Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " +
"Rounded down to the nearest multiple of 8."),
SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8,
"Maximum number of threads for remote Spark driver's RPC event loop."),
SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024,
"Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."),
SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null,
"Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."),
SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5",
"Name of the SASL mechanism to use for authentication."),
SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "",
"The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " +
"Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." +
"This is only necessary if the host has mutiple network addresses and if a different network address other than " +
"hive.server2.thrift.bind.host is to be used."),
SPARK_RPC_SERVER_PORT("hive.spark.client.rpc.server.port", "", "A list of port ranges which can be used by RPC server " +
"with the format of 49152-49222,49228 and a random one is selected from the list. Default is empty, which randomly " +
"selects one port from all available ones."),
SPARK_DYNAMIC_PARTITION_PRUNING(
"hive.spark.dynamic.partition.pruning", false,
"When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" +
"to a temporary HDFS file, and read later for removing unnecessary partitions."),
SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE(
"hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L,
"Maximum total data size in dynamic pruning."),
SPARK_USE_GROUPBY_SHUFFLE(
"hive.spark.use.groupby.shuffle", true,
"Spark groupByKey transformation has better performance but uses unbounded memory." +
"Turn this off when there is a memory issue."),
SPARK_JOB_MAX_TASKS("hive.spark.job.max.tasks", -1, "The maximum number of tasks a Spark job may have.\n" +
"If a Spark job contains more tasks than the maximum, it will be cancelled. A value of -1 means no limit."),
NWAYJOINREORDER("hive.reorder.nway.joins", true,
"Runs reordering of tables within single n-way join (i.e.: picks streamtable)"),
HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", true,
"Merge adjacent joins into a single n-way join"),
HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null),
"If value is greater than 0 logs in fixed intervals of size n rather than exponentially."),
HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw",
new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " +
"directories that are partition-like but contain unsupported characters. 'throw' (an " +
"exception) is the default; 'skip' will skip the invalid directories and still repair the" +
" others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"),
HIVE_MSCK_REPAIR_BATCH_SIZE(
"hive.msck.repair.batch.size", 0,
"Batch size for the msck repair command. If the value is greater than zero,\n "
+ "it will execute batch wise with the configured batch size. In case of errors while\n"
+ "adding unknown partitions the batch size is automatically reduced by half in the subsequent\n"
+ "retry attempt. The default value is zero which means it will execute directly (not batch wise)"),
HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES("hive.msck.repair.batch.max.retries", 0,
"Maximum number of retries for the msck repair command when adding unknown partitions.\n "
+ "If the value is greater than zero it will retry adding unknown partitions until the maximum\n"
+ "number of attempts is reached or batch size is reduced to 0, whichever is earlier.\n"
+ "In each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\n"
+ "If the value is set to zero it will retry until the batch size becomes zero as described above."),
HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1,
"The number of queries allowed in parallel via llap. Negative number implies 'infinite'."),
HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true,
"Enable memory manager for tez"),
HIVE_HASH_TABLE_INFLATION_FACTOR("hive.hash.table.inflation.factor", (float) 2.0,
"Expected inflation factor between disk/in memory representation of hash tables"),
HIVE_LOG_TRACE_ID("hive.log.trace.id", "",
"Log tracing id that can be used by upstream clients for tracking respective logs. " +
"Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."),
HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
"hive.security.authenticator.manager,hive.security.authorization.manager," +
"hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager," +
"hive.users.in.admin.role,hive.server2.xsrf.filter.enabled,hive.security.authorization.enabled," +
"hive.server2.authentication.ldap.baseDN," +
"hive.server2.authentication.ldap.url," +
"hive.server2.authentication.ldap.Domain," +
"hive.server2.authentication.ldap.groupDNPattern," +
"hive.server2.authentication.ldap.groupFilter," +
"hive.server2.authentication.ldap.userDNPattern," +
"hive.server2.authentication.ldap.userFilter," +
"hive.server2.authentication.ldap.groupMembershipKey," +
"hive.server2.authentication.ldap.userMembershipKey," +
"hive.server2.authentication.ldap.groupClassKey," +
"hive.server2.authentication.ldap.customLDAPQuery",
"Comma separated list of configuration options which are immutable at runtime"),
HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list",
METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname
// Adding the S3 credentials from Hadoop config to be hidden
+ ",fs.s3.awsAccessKeyId"
+ ",fs.s3.awsSecretAccessKey"
+ ",fs.s3n.awsAccessKeyId"
+ ",fs.s3n.awsSecretAccessKey"
+ ",fs.s3a.access.key"
+ ",fs.s3a.secret.key"
+ ",fs.s3a.proxy.password",
"Comma separated list of configuration options which should not be read by normal user like passwords"),
HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
"hive.added.files.path,hive.added.jars.path,hive.added.archives.path",
"Comma separated list of variables which are used internally and should not be configurable."),
HIVE_QUERY_TIMEOUT_SECONDS("hive.query.timeout.seconds", "0s",
new TimeValidator(TimeUnit.SECONDS),
"Timeout for Running Query in seconds. A nonpositive value means infinite. " +
"If the query timeout is also set by thrift API call, the smaller one will be taken."),
HIVE_EXEC_INPUT_LISTING_MAX_THREADS("hive.exec.input.listing.max.threads", 0, new SizeValidator(0L, true, 1024L, true),
"Maximum number of threads that Hive uses to list file information from file systems (recommended > 1 for blobstore)."),
/* BLOBSTORE section */
HIVE_BLOBSTORE_SUPPORTED_SCHEMES("hive.blobstore.supported.schemes", "s3,s3a,s3n",
"Comma-separated list of supported blobstore schemes."),
HIVE_BLOBSTORE_USE_BLOBSTORE_AS_SCRATCHDIR("hive.blobstore.use.blobstore.as.scratchdir", false,
"Enable the use of scratch directories directly on blob storage systems (it may cause performance penalties)."),
HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED("hive.blobstore.optimizations.enabled", true,
"This parameter enables a number of optimizations when running on blobstores:\n" +
"(1) If hive.blobstore.use.blobstore.as.scratchdir is false, force the last Hive job to write to the blobstore.\n" +
"This is a performance optimization that forces the final FileSinkOperator to write to the blobstore.\n" +
"See HIVE-15121 for details.");
public final String varname;
public final String altName;
private final String defaultExpr;
public final String defaultStrVal;
public final int defaultIntVal;
public final long defaultLongVal;
public final float defaultFloatVal;
public final boolean defaultBoolVal;
private final Class<?> valClass;
private final VarType valType;
private final Validator validator;
private final String description;
private final boolean excluded;
private final boolean caseSensitive;
ConfVars(String varname, Object defaultVal, String description) {
this(varname, defaultVal, null, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, String description, String altName) {
this(varname, defaultVal, null, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
String altName) {
this(varname, defaultVal, validator, description, true, false, altName);
}
ConfVars(String varname, Object defaultVal, String description, boolean excluded) {
this(varname, defaultVal, null, description, true, excluded, null);
}
ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) {
this(varname, defaultVal, null, description, caseSensitive, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description) {
this(varname, defaultVal, validator, description, true, false, null);
}
ConfVars(String varname, Object defaultVal, Validator validator, String description,
boolean caseSensitive, boolean excluded, String altName) {
this.varname = varname;
this.validator = validator;
this.description = description;
this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal);
this.excluded = excluded;
this.caseSensitive = caseSensitive;
this.altName = altName;
if (defaultVal == null || defaultVal instanceof String) {
this.valClass = String.class;
this.valType = VarType.STRING;
this.defaultStrVal = SystemVariables.substitute((String)defaultVal);
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Integer) {
this.valClass = Integer.class;
this.valType = VarType.INT;
this.defaultStrVal = null;
this.defaultIntVal = (Integer)defaultVal;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Long) {
this.valClass = Long.class;
this.valType = VarType.LONG;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = (Long)defaultVal;
this.defaultFloatVal = -1;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Float) {
this.valClass = Float.class;
this.valType = VarType.FLOAT;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = (Float)defaultVal;
this.defaultBoolVal = false;
} else if (defaultVal instanceof Boolean) {
this.valClass = Boolean.class;
this.valType = VarType.BOOLEAN;
this.defaultStrVal = null;
this.defaultIntVal = -1;
this.defaultLongVal = -1;
this.defaultFloatVal = -1;
this.defaultBoolVal = (Boolean)defaultVal;
} else {
throw new IllegalArgumentException("Not supported type value " + defaultVal.getClass() +
" for name " + varname);
}
}
public boolean isType(String value) {
return valType.isType(value);
}
public Validator getValidator() {
return validator;
}
public String validate(String value) {
return validator == null ? null : validator.validate(value);
}
public String validatorDescription() {
return validator == null ? null : validator.toDescription();
}
public String typeString() {
String type = valType.typeString();
if (valType == VarType.STRING && validator != null) {
if (validator instanceof TimeValidator) {
type += "(TIME)";
}
}
return type;
}
public String getRawDescription() {
return description;
}
public String getDescription() {
String validator = validatorDescription();
if (validator != null) {
return validator + ".\n" + description;
}
return description;
}
public boolean isExcluded() {
return excluded;
}
public boolean isCaseSensitive() {
return caseSensitive;
}
@Override
public String toString() {
return varname;
}
private static String findHadoopBinary() {
String val = findHadoopHome();
// if can't find hadoop home we can at least try /usr/bin/hadoop
val = (val == null ? File.separator + "usr" : val)
+ File.separator + "bin" + File.separator + "hadoop";
// Launch hadoop command file on windows.
return val;
}
private static String findYarnBinary() {
String val = findHadoopHome();
val = (val == null ? "yarn" : val + File.separator + "bin" + File.separator + "yarn");
return val;
}
private static String findHadoopHome() {
String val = System.getenv("HADOOP_HOME");
// In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX
if (val == null) {
val = System.getenv("HADOOP_PREFIX");
}
return val;
}
public String getDefaultValue() {
return valType.defaultValueString(this);
}
public String getDefaultExpr() {
return defaultExpr;
}
private Set<String> getValidStringValues() {
if (validator == null || !(validator instanceof StringSet)) {
throw new RuntimeException(varname + " does not specify a list of valid values");
}
return ((StringSet)validator).getExpected();
}
enum VarType {
STRING {
@Override
void checkType(String value) throws Exception { }
@Override
String defaultValueString(ConfVars confVar) { return confVar.defaultStrVal; }
},
INT {
@Override
void checkType(String value) throws Exception { Integer.valueOf(value); }
},
LONG {
@Override
void checkType(String value) throws Exception { Long.valueOf(value); }
},
FLOAT {
@Override
void checkType(String value) throws Exception { Float.valueOf(value); }
},
BOOLEAN {
@Override
void checkType(String value) throws Exception { Boolean.valueOf(value); }
};
boolean isType(String value) {
try { checkType(value); } catch (Exception e) { return false; }
return true;
}
String typeString() { return name().toUpperCase();}
String defaultValueString(ConfVars confVar) { return confVar.defaultExpr; }
abstract void checkType(String value) throws Exception;
}
}
/**
* Writes the default ConfVars out to a byte array and returns an input
* stream wrapping that byte array.
*
* We need this in order to initialize the ConfVar properties
* in the underling Configuration object using the addResource(InputStream)
* method.
*
* It is important to use a LoopingByteArrayInputStream because it turns out
* addResource(InputStream) is broken since Configuration tries to read the
* entire contents of the same InputStream repeatedly without resetting it.
* LoopingByteArrayInputStream has special logic to handle this.
*/
private static synchronized InputStream getConfVarInputStream() {
if (confVarByteArray == null) {
try {
// Create a Hadoop configuration without inheriting default settings.
Configuration conf = new Configuration(false);
applyDefaultNonNullConfVars(conf);
ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream();
conf.writeXml(confVarBaos);
confVarByteArray = confVarBaos.toByteArray();
} catch (Exception e) {
// We're pretty screwed if we can't load the default conf vars
throw new RuntimeException("Failed to initialize default Hive configuration variables!", e);
}
}
return new LoopingByteArrayInputStream(confVarByteArray);
}
public void verifyAndSet(String name, String value) throws IllegalArgumentException {
if (modWhiteListPattern != null) {
Matcher wlMatcher = modWhiteListPattern.matcher(name);
if (!wlMatcher.matches()) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. "
+ "It is not in list of params that are allowed to be modified at runtime");
}
}
if (restrictList.contains(name)) {
throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
+ " of parameters that can't be modified at runtime");
}
String oldValue = name != null ? get(name) : null;
if (name == null || value == null || !value.equals(oldValue)) {
// When either name or value is null, the set method below will fail,
// and throw IllegalArgumentException
set(name, value);
if (isSparkRelatedConfig(name)) {
isSparkConfigUpdated = true;
}
}
}
public boolean isHiddenConfig(String name) {
return hiddenSet.contains(name);
}
/**
* check whether spark related property is updated, which includes spark configurations,
* RSC configurations and yarn configuration in Spark on YARN mode.
* @param name
* @return
*/
private boolean isSparkRelatedConfig(String name) {
boolean result = false;
if (name.startsWith("spark")) { // Spark property.
// for now we don't support changing spark app name on the fly
result = !name.equals("spark.app.name");
} else if (name.startsWith("yarn")) { // YARN property in Spark on YARN mode.
String sparkMaster = get("spark.master");
if (sparkMaster != null && sparkMaster.startsWith("yarn")) {
result = true;
}
} else if (name.startsWith("hive.spark")) { // Remote Spark Context property.
result = true;
} else if (name.equals("mapreduce.job.queuename")) {
// a special property starting with mapreduce that we would also like to effect if it changes
result = true;
}
return result;
}
public static int getIntVar(Configuration conf, ConfVars var) {
assert (var.valClass == Integer.class) : var.varname;
if (var.altName != null) {
return conf.getInt(var.varname, conf.getInt(var.altName, var.defaultIntVal));
}
return conf.getInt(var.varname, var.defaultIntVal);
}
public static void setIntVar(Configuration conf, ConfVars var, int val) {
assert (var.valClass == Integer.class) : var.varname;
conf.setInt(var.varname, val);
}
public int getIntVar(ConfVars var) {
return getIntVar(this, var);
}
public void setIntVar(ConfVars var, int val) {
setIntVar(this, var, val);
}
public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) {
return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit);
}
public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, time + stringFor(timeunit));
}
public long getTimeVar(ConfVars var, TimeUnit outUnit) {
return getTimeVar(this, var, outUnit);
}
public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
setTimeVar(this, var, time, outUnit);
}
public static long getSizeVar(Configuration conf, ConfVars var) {
return toSizeBytes(getVar(conf, var));
}
public long getSizeVar(ConfVars var) {
return getSizeVar(this, var);
}
private static TimeUnit getDefaultTimeUnit(ConfVars var) {
TimeUnit inputUnit = null;
if (var.validator instanceof TimeValidator) {
inputUnit = ((TimeValidator)var.validator).getTimeUnit();
}
return inputUnit;
}
public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return outUnit.convert(Long.parseLong(parsed[0].trim()), unitFor(parsed[1].trim(), inputUnit));
}
public static long toSizeBytes(String value) {
String[] parsed = parseNumberFollowedByUnit(value.trim());
return Long.parseLong(parsed[0].trim()) * multiplierFor(parsed[1].trim());
}
private static String[] parseNumberFollowedByUnit(String value) {
char[] chars = value.toCharArray();
int i = 0;
for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
}
return new String[] {value.substring(0, i), value.substring(i)};
}
public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("l")) {
if (defaultUnit == null) {
throw new IllegalArgumentException("Time unit is not specified");
}
return defaultUnit;
} else if (unit.equals("d") || unit.startsWith("day")) {
return TimeUnit.DAYS;
} else if (unit.equals("h") || unit.startsWith("hour")) {
return TimeUnit.HOURS;
} else if (unit.equals("m") || unit.startsWith("min")) {
return TimeUnit.MINUTES;
} else if (unit.equals("s") || unit.startsWith("sec")) {
return TimeUnit.SECONDS;
} else if (unit.equals("ms") || unit.startsWith("msec")) {
return TimeUnit.MILLISECONDS;
} else if (unit.equals("us") || unit.startsWith("usec")) {
return TimeUnit.MICROSECONDS;
} else if (unit.equals("ns") || unit.startsWith("nsec")) {
return TimeUnit.NANOSECONDS;
}
throw new IllegalArgumentException("Invalid time unit " + unit);
}
public static long multiplierFor(String unit) {
unit = unit.trim().toLowerCase();
if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) {
return 1;
} else if (unit.equals("kb")) {
return 1024;
} else if (unit.equals("mb")) {
return 1024*1024;
} else if (unit.equals("gb")) {
return 1024*1024*1024;
} else if (unit.equals("tb")) {
return 1024*1024*1024*1024;
} else if (unit.equals("pb")) {
return 1024*1024*1024*1024*1024;
}
throw new IllegalArgumentException("Invalid size unit " + unit);
}
public static String stringFor(TimeUnit timeunit) {
switch (timeunit) {
case DAYS: return "day";
case HOURS: return "hour";
case MINUTES: return "min";
case SECONDS: return "sec";
case MILLISECONDS: return "msec";
case MICROSECONDS: return "usec";
case NANOSECONDS: return "nsec";
}
throw new IllegalArgumentException("Invalid timeunit " + timeunit);
}
public static long getLongVar(Configuration conf, ConfVars var) {
assert (var.valClass == Long.class) : var.varname;
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, var.defaultLongVal));
}
return conf.getLong(var.varname, var.defaultLongVal);
}
public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) {
if (var.altName != null) {
return conf.getLong(var.varname, conf.getLong(var.altName, defaultVal));
}
return conf.getLong(var.varname, defaultVal);
}
public static void setLongVar(Configuration conf, ConfVars var, long val) {
assert (var.valClass == Long.class) : var.varname;
conf.setLong(var.varname, val);
}
public long getLongVar(ConfVars var) {
return getLongVar(this, var);
}
public void setLongVar(ConfVars var, long val) {
setLongVar(this, var, val);
}
public static float getFloatVar(Configuration conf, ConfVars var) {
assert (var.valClass == Float.class) : var.varname;
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, var.defaultFloatVal));
}
return conf.getFloat(var.varname, var.defaultFloatVal);
}
public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) {
if (var.altName != null) {
return conf.getFloat(var.varname, conf.getFloat(var.altName, defaultVal));
}
return conf.getFloat(var.varname, defaultVal);
}
public static void setFloatVar(Configuration conf, ConfVars var, float val) {
assert (var.valClass == Float.class) : var.varname;
conf.setFloat(var.varname, val);
}
public float getFloatVar(ConfVars var) {
return getFloatVar(this, var);
}
public void setFloatVar(ConfVars var, float val) {
setFloatVar(this, var, val);
}
public static boolean getBoolVar(Configuration conf, ConfVars var) {
assert (var.valClass == Boolean.class) : var.varname;
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, var.defaultBoolVal));
}
return conf.getBoolean(var.varname, var.defaultBoolVal);
}
public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) {
if (var.altName != null) {
return conf.getBoolean(var.varname, conf.getBoolean(var.altName, defaultVal));
}
return conf.getBoolean(var.varname, defaultVal);
}
public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
assert (var.valClass == Boolean.class) : var.varname;
conf.setBoolean(var.varname, val);
}
public boolean getBoolVar(ConfVars var) {
return getBoolVar(this, var);
}
public void setBoolVar(ConfVars var, boolean val) {
setBoolVar(this, var, val);
}
public static String getVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal))
: conf.get(var.varname, var.defaultStrVal);
}
public static String getVarWithoutType(Configuration conf, ConfVars var) {
return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultExpr))
: conf.get(var.varname, var.defaultExpr);
}
public static String getTrimmedVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
if (var.altName != null) {
return conf.getTrimmed(var.varname, conf.getTrimmed(var.altName, var.defaultStrVal));
}
return conf.getTrimmed(var.varname, var.defaultStrVal);
}
public static String[] getTrimmedStringsVar(Configuration conf, ConfVars var) {
assert (var.valClass == String.class) : var.varname;
String[] result = conf.getTrimmedStrings(var.varname, (String[])null);
if (result != null) return result;
if (var.altName != null) {
result = conf.getTrimmedStrings(var.altName, (String[])null);
if (result != null) return result;
}
return org.apache.hadoop.util.StringUtils.getTrimmedStrings(var.defaultStrVal);
}
public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal))
: conf.get(var.varname, defaultVal);
return ret;
}
public static String getVar(Configuration conf, ConfVars var, EncoderDecoder<String, String> encoderDecoder) {
return encoderDecoder.decode(getVar(conf, var));
}
public String getLogIdVar(String defaultValue) {
String retval = getVar(ConfVars.HIVE_LOG_TRACE_ID);
if (retval.equals("")) {
l4j.info("Using the default value passed in for log id: " + defaultValue);
retval = defaultValue;
}
if (retval.length() > LOG_PREFIX_LENGTH) {
l4j.warn("The original log id prefix is " + retval + " has been truncated to "
+ retval.substring(0, LOG_PREFIX_LENGTH - 1));
retval = retval.substring(0, LOG_PREFIX_LENGTH - 1);
}
return retval;
}
public static void setVar(Configuration conf, ConfVars var, String val) {
assert (var.valClass == String.class) : var.varname;
conf.set(var.varname, val);
}
public static void setVar(Configuration conf, ConfVars var, String val,
EncoderDecoder<String, String> encoderDecoder) {
setVar(conf, var, encoderDecoder.encode(val));
}
public static ConfVars getConfVars(String name) {
return vars.get(name);
}
public static ConfVars getMetaConf(String name) {
return metaConfs.get(name);
}
public String getVar(ConfVars var) {
return getVar(this, var);
}
public void setVar(ConfVars var, String val) {
setVar(this, var, val);
}
public String getQueryString() {
return getQueryString(this);
}
public static String getQueryString(Configuration conf) {
return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void setQueryString(String query) {
setQueryString(this, query);
}
public static void setQueryString(Configuration conf, String query) {
setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER);
}
public void logVars(PrintStream ps) {
for (ConfVars one : ConfVars.values()) {
ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
}
}
public HiveConf() {
super();
initialize(this.getClass());
}
public HiveConf(Class<?> cls) {
super();
initialize(cls);
}
public HiveConf(Configuration other, Class<?> cls) {
super(other);
initialize(cls);
}
/**
* Copy constructor
*/
public HiveConf(HiveConf other) {
super(other);
hiveJar = other.hiveJar;
auxJars = other.auxJars;
isSparkConfigUpdated = other.isSparkConfigUpdated;
origProp = (Properties)other.origProp.clone();
restrictList.addAll(other.restrictList);
hiddenSet.addAll(other.hiddenSet);
modWhiteListPattern = other.modWhiteListPattern;
}
public Properties getAllProperties() {
return getProperties(this);
}
public static Properties getProperties(Configuration conf) {
Iterator<Map.Entry<String, String>> iter = conf.iterator();
Properties p = new Properties();
while (iter.hasNext()) {
Map.Entry<String, String> e = iter.next();
p.setProperty(e.getKey(), e.getValue());
}
return p;
}
private void initialize(Class<?> cls) {
hiveJar = (new JobConf(cls)).getJar();
// preserve the original configuration
origProp = getAllProperties();
// Overlay the ConfVars. Note that this ignores ConfVars with null values
addResource(getConfVarInputStream());
// Overlay hive-site.xml if it exists
if (hiveSiteURL != null) {
addResource(hiveSiteURL);
}
// if embedded metastore is to be used as per config so far
// then this is considered like the metastore server case
String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS);
if(HiveConfUtil.isEmbeddedMetaStore(msUri)){
setLoadMetastoreConfig(true);
}
// load hivemetastore-site.xml if this is metastore and file exists
if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) {
addResource(hivemetastoreSiteUrl);
}
// load hiveserver2-site.xml if this is hiveserver2 and file exists
// metastore can be embedded within hiveserver2, in such cases
// the conf params in hiveserver2-site.xml will override whats defined
// in hivemetastore-site.xml
if (isLoadHiveServer2Config() && hiveServer2SiteUrl != null) {
addResource(hiveServer2SiteUrl);
}
// Overlay the values of any system properties whose names appear in the list of ConfVars
applySystemProperties();
if ((this.get("hive.metastore.ds.retry.attempts") != null) ||
this.get("hive.metastore.ds.retry.interval") != null) {
l4j.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " +
"Use hive.hmshandler.retry.* instead");
}
// if the running class was loaded directly (through eclipse) rather than through a
// jar then this would be needed
if (hiveJar == null) {
hiveJar = this.get(ConfVars.HIVEJAR.varname);
}
if (auxJars == null) {
auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ',');
}
if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) {
setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false);
}
if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
List<String> trimmed = new ArrayList<String>();
for (Map.Entry<String,String> entry : this) {
String key = entry.getKey();
if (key == null || !key.startsWith("hive.")) {
continue;
}
ConfVars var = HiveConf.getConfVars(key);
if (var == null) {
var = HiveConf.getConfVars(key.trim());
if (var != null) {
trimmed.add(key);
}
}
if (var == null) {
l4j.warn("HiveConf of name " + key + " does not exist");
} else if (!var.isType(entry.getValue())) {
l4j.warn("HiveConf " + var.varname + " expects " + var.typeString() + " type value");
}
}
for (String key : trimmed) {
set(key.trim(), getRaw(key));
unset(key);
}
}
setupSQLStdAuthWhiteList();
// setup list of conf vars that are not allowed to change runtime
setupRestrictList();
hiddenSet.clear();
hiddenSet.addAll(HiveConfUtil.getHiddenSet(this));
}
/**
* If the config whitelist param for sql standard authorization is not set, set it up here.
*/
private void setupSQLStdAuthWhiteList() {
String whiteListParamsStr = getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST);
if (whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) {
// set the default configs in whitelist
whiteListParamsStr = getSQLStdAuthDefaultWhiteListPattern();
}
setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr);
}
private static String getSQLStdAuthDefaultWhiteListPattern() {
// create the default white list from list of safe config params
// and regex list
String confVarPatternStr = Joiner.on("|").join(convertVarsToRegex(sqlStdAuthSafeVarNames));
String regexPatternStr = Joiner.on("|").join(sqlStdAuthSafeVarNameRegexes);
return regexPatternStr + "|" + confVarPatternStr;
}
/**
* @param paramList list of parameter strings
* @return list of parameter strings with "." replaced by "\."
*/
private static String[] convertVarsToRegex(String[] paramList) {
String[] regexes = new String[paramList.length];
for(int i=0; i<paramList.length; i++) {
regexes[i] = paramList[i].replace(".", "\\." );
}
return regexes;
}
/**
* Default list of modifiable config parameters for sql standard authorization
* For internal use only.
*/
private static final String [] sqlStdAuthSafeVarNames = new String [] {
ConfVars.AGGR_JOIN_TRANSPOSE.varname,
ConfVars.BYTESPERREDUCER.varname,
ConfVars.CLIENT_STATS_COUNTERS.varname,
ConfVars.DEFAULTPARTITIONNAME.varname,
ConfVars.DROPIGNORESNONEXISTENT.varname,
ConfVars.HIVECOUNTERGROUP.varname,
ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname,
ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
ConfVars.HIVEEXPREVALUATIONCACHE.varname,
ConfVars.HIVEQUERYRESULTFILEFORMAT.varname,
ConfVars.HIVEHASHTABLELOADFACTOR.varname,
ConfVars.HIVEHASHTABLETHRESHOLD.varname,
ConfVars.HIVEIGNOREMAPJOINHINT.varname,
ConfVars.HIVELIMITMAXROWSIZE.varname,
ConfVars.HIVEMAPREDMODE.varname,
ConfVars.HIVEMAPSIDEAGGREGATE.varname,
ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname,
ConfVars.HIVEROWOFFSET.varname,
ConfVars.HIVEVARIABLESUBSTITUTE.varname,
ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname,
ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname,
ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
ConfVars.HIVE_COMPAT.varname,
ConfVars.HIVE_CONCATENATE_CHECK_INDEX.varname,
ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
ConfVars.HIVE_EXECUTION_ENGINE.varname,
ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname,
ConfVars.HIVE_EXIM_URI_SCHEME_WL.varname,
ConfVars.HIVE_FILE_MAX_FOOTER.varname,
ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,
ConfVars.HIVE_SCHEMA_EVOLUTION.varname,
ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname,
ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS.varname,
ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES.varname,
ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES.varname,
ConfVars.JOB_DEBUG_TIMEOUT.varname,
ConfVars.LLAP_IO_ENABLED.varname,
ConfVars.LLAP_IO_USE_FILEID_PATH.varname,
ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname,
ConfVars.LLAP_EXECUTION_MODE.varname,
ConfVars.LLAP_AUTO_ALLOW_UBER.varname,
ConfVars.LLAP_AUTO_ENFORCE_TREE.varname,
ConfVars.LLAP_AUTO_ENFORCE_VECTORIZED.varname,
ConfVars.LLAP_AUTO_ENFORCE_STATS.varname,
ConfVars.LLAP_AUTO_MAX_INPUT.varname,
ConfVars.LLAP_AUTO_MAX_OUTPUT.varname,
ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK.varname,
ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname,
ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname,
ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname,
ConfVars.MAXCREATEDFILES.varname,
ConfVars.MAXREDUCERS.varname,
ConfVars.NWAYJOINREORDER.varname,
ConfVars.OUTPUT_FILE_EXTENSION.varname,
ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname,
ConfVars.TASKLOG_DEBUG_TIMEOUT.varname,
ConfVars.HIVEQUERYID.varname,
};
/**
* Default list of regexes for config parameters that are modifiable with
* sql standard authorization enabled
*/
static final String [] sqlStdAuthSafeVarNameRegexes = new String [] {
"hive\\.auto\\..*",
"hive\\.cbo\\..*",
"hive\\.convert\\..*",
"hive\\.exec\\.dynamic\\.partition.*",
"hive\\.exec\\.max\\.dynamic\\.partitions.*",
"hive\\.exec\\.compress\\..*",
"hive\\.exec\\.infer\\..*",
"hive\\.exec\\.mode.local\\..*",
"hive\\.exec\\.orc\\..*",
"hive\\.exec\\.parallel.*",
"hive\\.explain\\..*",
"hive\\.fetch.task\\..*",
"hive\\.groupby\\..*",
"hive\\.hbase\\..*",
"hive\\.index\\..*",
"hive\\.index\\..*",
"hive\\.intermediate\\..*",
"hive\\.join\\..*",
"hive\\.limit\\..*",
"hive\\.log\\..*",
"hive\\.mapjoin\\..*",
"hive\\.merge\\..*",
"hive\\.optimize\\..*",
"hive\\.orc\\..*",
"hive\\.outerjoin\\..*",
"hive\\.parquet\\..*",
"hive\\.ppd\\..*",
"hive\\.prewarm\\..*",
"hive\\.server2\\.thrift\\.resultset\\.default\\.fetch\\.size",
"hive\\.server2\\.proxy\\.user",
"hive\\.skewjoin\\..*",
"hive\\.smbjoin\\..*",
"hive\\.stats\\..*",
"hive\\.strict\\..*",
"hive\\.tez\\..*",
"hive\\.vectorized\\..*",
"mapred\\.map\\..*",
"mapred\\.reduce\\..*",
"mapred\\.output\\.compression\\.codec",
"mapred\\.job\\.queuename",
"mapred\\.output\\.compression\\.type",
"mapred\\.min\\.split\\.size",
"mapreduce\\.job\\.reduce\\.slowstart\\.completedmaps",
"mapreduce\\.job\\.queuename",
"mapreduce\\.job\\.tags",
"mapreduce\\.input\\.fileinputformat\\.split\\.minsize",
"mapreduce\\.map\\..*",
"mapreduce\\.reduce\\..*",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.codec",
"mapreduce\\.output\\.fileoutputformat\\.compress\\.type",
"oozie\\..*",
"tez\\.am\\..*",
"tez\\.task\\..*",
"tez\\.runtime\\..*",
"tez\\.queue\\.name",
};
/**
* Apply system properties to this object if the property name is defined in ConfVars
* and the value is non-null and not an empty string.
*/
private void applySystemProperties() {
Map<String, String> systemProperties = getConfSystemProperties();
for (Entry<String, String> systemProperty : systemProperties.entrySet()) {
this.set(systemProperty.getKey(), systemProperty.getValue());
}
}
/**
* This method returns a mapping from config variable name to its value for all config variables
* which have been set using System properties
*/
public static Map<String, String> getConfSystemProperties() {
Map<String, String> systemProperties = new HashMap<String, String>();
for (ConfVars oneVar : ConfVars.values()) {
if (System.getProperty(oneVar.varname) != null) {
if (System.getProperty(oneVar.varname).length() > 0) {
systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname));
}
}
}
return systemProperties;
}
/**
* Overlays ConfVar properties with non-null values
*/
private static void applyDefaultNonNullConfVars(Configuration conf) {
for (ConfVars var : ConfVars.values()) {
String defaultValue = var.getDefaultValue();
if (defaultValue == null) {
// Don't override ConfVars with null values
continue;
}
conf.set(var.varname, defaultValue);
}
}
public Properties getChangedProperties() {
Properties ret = new Properties();
Properties newProp = getAllProperties();
for (Object one : newProp.keySet()) {
String oneProp = (String) one;
String oldValue = origProp.getProperty(oneProp);
if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) {
ret.setProperty(oneProp, newProp.getProperty(oneProp));
}
}
return (ret);
}
public String getJar() {
return hiveJar;
}
/**
* @return the auxJars
*/
public String getAuxJars() {
return auxJars;
}
/**
* Set the auxiliary jars. Used for unit tests only.
* @param auxJars the auxJars to set.
*/
public void setAuxJars(String auxJars) {
this.auxJars = auxJars;
setVar(this, ConfVars.HIVEAUXJARS, auxJars);
}
public URL getHiveDefaultLocation() {
return hiveDefaultURL;
}
public static void setHiveSiteLocation(URL location) {
hiveSiteURL = location;
}
public static URL getHiveSiteLocation() {
return hiveSiteURL;
}
public static URL getMetastoreSiteLocation() {
return hivemetastoreSiteUrl;
}
public static URL getHiveServer2SiteLocation() {
return hiveServer2SiteUrl;
}
/**
* @return the user name set in hadoop.job.ugi param or the current user from System
* @throws IOException
*/
public String getUser() throws IOException {
try {
UserGroupInformation ugi = Utils.getUGI();
return ugi.getUserName();
} catch (LoginException le) {
throw new IOException(le);
}
}
public static String getColumnInternalName(int pos) {
return "_col" + pos;
}
public static int getPositionFromInternalName(String internalName) {
Pattern internalPattern = Pattern.compile("_col([0-9]+)");
Matcher m = internalPattern.matcher(internalName);
if (!m.matches()){
return -1;
} else {
return Integer.parseInt(m.group(1));
}
}
/**
* Append comma separated list of config vars to the restrict List
* @param restrictListStr
*/
public void addToRestrictList(String restrictListStr) {
if (restrictListStr == null) {
return;
}
String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
if (oldList == null || oldList.isEmpty()) {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr);
} else {
this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr);
}
setupRestrictList();
}
/**
* Set white list of parameters that are allowed to be modified
*
* @param paramNameRegex
*/
@LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
public void setModifiableWhiteListRegex(String paramNameRegex) {
if (paramNameRegex == null) {
return;
}
modWhiteListPattern = Pattern.compile(paramNameRegex);
}
/**
* Add the HIVE_CONF_RESTRICTED_LIST values to restrictList,
* including HIVE_CONF_RESTRICTED_LIST itself
*/
private void setupRestrictList() {
String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
restrictList.clear();
if (restrictListStr != null) {
for (String entry : restrictListStr.split(",")) {
restrictList.add(entry.trim());
}
}
String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST);
if (internalVariableListStr != null) {
for (String entry : internalVariableListStr.split(",")) {
restrictList.add(entry.trim());
}
}
restrictList.add(ConfVars.HIVE_IN_TEST.varname);
restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname);
restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname);
}
/**
* Strips hidden config entries from configuration
*/
public void stripHiddenConfigurations(Configuration conf) {
HiveConfUtil.stripConfigurations(conf, hiddenSet);
}
/**
* @return true if HS2 webui is enabled
*/
public boolean isWebUiEnabled() {
return this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT) != 0;
}
/**
* @return true if HS2 webui query-info cache is enabled
*/
public boolean isWebUiQueryInfoCacheEnabled() {
return isWebUiEnabled() && this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES) > 0;
}
public static boolean isLoadMetastoreConfig() {
return loadMetastoreConfig;
}
public static void setLoadMetastoreConfig(boolean loadMetastoreConfig) {
HiveConf.loadMetastoreConfig = loadMetastoreConfig;
}
public static boolean isLoadHiveServer2Config() {
return loadHiveServer2Config;
}
public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) {
HiveConf.loadHiveServer2Config = loadHiveServer2Config;
}
public static class StrictChecks {
private static final String NO_LIMIT_MSG = makeMessage(
"Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
private static final String NO_PARTITIONLESS_MSG = makeMessage(
"Queries against partitioned tables without a partition filter",
ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
private static final String NO_COMPARES_MSG = makeMessage(
"Unsafe compares between different types", ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
private static final String NO_CARTESIAN_MSG = makeMessage(
"Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN);
private static final String NO_BUCKETING_MSG = makeMessage(
"Load into bucketed tables", ConfVars.HIVE_STRICT_CHECKS_BUCKETING);
private static String makeMessage(String what, ConfVars setting) {
return what + " are disabled for safety reasons. If you know what you are doing, please set"
+ setting.varname + " to false and that " + ConfVars.HIVEMAPREDMODE.varname + " is not"
+ " set to 'strict' to proceed. Note that if you may get errors or incorrect results if"
+ " you make a mistake while using some of the unsafe features.";
}
public static String checkNoLimit(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY) ? null : NO_LIMIT_MSG;
}
public static String checkNoPartitionFilter(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY)
? null : NO_PARTITIONLESS_MSG;
}
public static String checkTypeSafety(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : NO_COMPARES_MSG;
}
public static String checkCartesian(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : NO_CARTESIAN_MSG;
}
public static String checkBucketing(Configuration conf) {
return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_BUCKETING) ? null : NO_BUCKETING_MSG;
}
private static boolean isAllowed(Configuration conf, ConfVars setting) {
String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null);
return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting);
}
}
public static String getNonMrEngines() {
String result = "";
for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) {
if ("mr".equals(s)) continue;
if (!result.isEmpty()) {
result += ", ";
}
result += s;
}
return result;
}
public static String generateMrDeprecationWarning() {
return "Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. "
+ "Consider using a different execution engine (i.e. " + HiveConf.getNonMrEngines()
+ ") or using Hive 1.X releases.";
}
private static final Object reverseMapLock = new Object();
private static HashMap<String, ConfVars> reverseMap = null;
public static HashMap<String, ConfVars> getOrCreateReverseMap() {
// This should be called rarely enough; for now it's ok to just lock every time.
synchronized (reverseMapLock) {
if (reverseMap != null) return reverseMap;
}
HashMap<String, ConfVars> vars = new HashMap<>();
for (ConfVars val : ConfVars.values()) {
vars.put(val.varname.toLowerCase(), val);
if (val.altName != null && !val.altName.isEmpty()) {
vars.put(val.altName.toLowerCase(), val);
}
}
synchronized (reverseMapLock) {
if (reverseMap != null) return reverseMap;
reverseMap = vars;
return reverseMap;
}
}
}
|
[
"\"HIVE_CONF_DIR\"",
"\"HIVE_HOME\"",
"\"HADOOP_HOME\"",
"\"HADOOP_PREFIX\""
] |
[] |
[
"HADOOP_PREFIX",
"HADOOP_HOME",
"HIVE_CONF_DIR",
"HIVE_HOME"
] |
[]
|
["HADOOP_PREFIX", "HADOOP_HOME", "HIVE_CONF_DIR", "HIVE_HOME"]
|
java
| 4 | 0 | |
code/python/echomesh/base/Path.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
# If this is True, you want Echomesh to use its own external packages in
# preference to any you might have installed in your system path.
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
internal/federation/server.go
|
package federation
import (
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"math/big"
"net"
"net/http"
"os"
"path"
"sync"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/complement/internal/b"
"github.com/matrix-org/complement/internal/docker"
)
// Server represents a federation server
type Server struct {
t *testing.T
// Default: true
UnexpectedRequestsAreErrors bool
Priv ed25519.PrivateKey
KeyID gomatrixserverlib.KeyID
ServerName string
certPath string
keyPath string
mux *mux.Router
srv *http.Server
directoryHandlerSetup bool
aliases map[string]string
rooms map[string]*ServerRoom
keyRing *gomatrixserverlib.KeyRing
}
// NewServer creates a new federation server with configured options.
func NewServer(t *testing.T, deployment *docker.Deployment, opts ...func(*Server)) *Server {
// generate signing key
_, priv, err := ed25519.GenerateKey(nil)
if err != nil {
t.Fatalf("federation.NewServer failed to generate ed25519 key: %s", err)
}
srv := &Server{
t: t,
Priv: priv,
KeyID: "ed25519:complement",
mux: mux.NewRouter(),
ServerName: docker.HostnameRunningComplement,
rooms: make(map[string]*ServerRoom),
aliases: make(map[string]string),
UnexpectedRequestsAreErrors: true,
}
fetcher := &basicKeyFetcher{
KeyFetcher: &gomatrixserverlib.DirectKeyFetcher{
Client: gomatrixserverlib.NewClient(
gomatrixserverlib.WithTransport(&docker.RoundTripper{Deployment: deployment}),
),
},
srv: srv,
}
srv.keyRing = &gomatrixserverlib.KeyRing{
KeyDatabase: &nopKeyDatabase{},
KeyFetchers: []gomatrixserverlib.KeyFetcher{
fetcher,
},
}
srv.mux.Use(func(h http.Handler) http.Handler {
// Return a json Content-Type header to all requests by default
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
h.ServeHTTP(w, r)
})
})
srv.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if srv.UnexpectedRequestsAreErrors {
t.Errorf("Server.UnexpectedRequestsAreErrors=true received unexpected request to server: %s %s", req.Method, req.URL.Path)
} else {
t.Logf("Server.UnexpectedRequestsAreErrors=false received unexpected request to server: %s %s", req.Method, req.URL.Path)
}
w.WriteHeader(404)
w.Write([]byte("complement: federation server is not listening for this path"))
})
// generate certs and an http.Server
httpServer, certPath, keyPath, err := federationServer("name", srv.mux)
if err != nil {
t.Fatalf("complement: unable to create federation server and certificates: %s", err.Error())
}
srv.certPath = certPath
srv.keyPath = keyPath
srv.srv = httpServer
for _, opt := range opts {
opt(srv)
}
return srv
}
// UserID returns the complete user ID for the given localpart
func (s *Server) UserID(localpart string) string {
return fmt.Sprintf("@%s:%s", localpart, s.ServerName)
}
// MakeAliasMapping will create a mapping of room alias to room ID on this server. Returns the alias.
// If this is the first time calling this function, a directory lookup handler will be added to
// handle alias requests over federation.
func (s *Server) MakeAliasMapping(aliasLocalpart, roomID string) string {
alias := fmt.Sprintf("#%s:%s", aliasLocalpart, s.ServerName)
s.aliases[alias] = roomID
HandleDirectoryLookups()(s)
return alias
}
// MustMakeRoom will add a room to this server so it is accessible to other servers when prompted via federation.
// The `events` will be added to this room. Returns the created room.
func (s *Server) MustMakeRoom(t *testing.T, roomVer gomatrixserverlib.RoomVersion, events []b.Event) *ServerRoom {
roomID := fmt.Sprintf("!%d:%s", len(s.rooms), s.ServerName)
room := &ServerRoom{
RoomID: roomID,
Version: roomVer,
State: make(map[string]*gomatrixserverlib.Event),
ForwardExtremities: make([]string, 0),
}
// sign all these events
for _, ev := range events {
signedEvent := s.MustCreateEvent(t, room, ev)
room.AddEvent(signedEvent)
}
s.rooms[roomID] = room
return room
}
// FederationClient returns a client which will sign requests using this server's key.
//
// The requests will be routed according to the deployment map in `deployment`.
func (s *Server) FederationClient(deployment *docker.Deployment) *gomatrixserverlib.FederationClient {
f := gomatrixserverlib.NewFederationClient(
gomatrixserverlib.ServerName(s.ServerName), s.KeyID, s.Priv,
gomatrixserverlib.WithTransport(&docker.RoundTripper{Deployment: deployment}),
)
return f
}
// MustCreateEvent will create and sign a new latest event for the given room.
// It does not insert this event into the room however. See ServerRoom.AddEvent for that.
func (s *Server) MustCreateEvent(t *testing.T, room *ServerRoom, ev b.Event) *gomatrixserverlib.Event {
t.Helper()
content, err := json.Marshal(ev.Content)
if err != nil {
t.Fatalf("MustCreateEvent: failed to marshal event content %s - %+v", err, ev.Content)
}
var unsigned []byte
if ev.Unsigned != nil {
unsigned, err = json.Marshal(ev.Unsigned)
if err != nil {
t.Fatalf("MustCreateEvent: failed to marshal event unsigned: %s - %+v", err, ev.Unsigned)
}
}
eb := gomatrixserverlib.EventBuilder{
Sender: ev.Sender,
Depth: int64(room.Depth + 1), // depth starts at 1
Type: ev.Type,
StateKey: ev.StateKey,
Content: content,
RoomID: room.RoomID,
PrevEvents: room.ForwardExtremities,
Unsigned: unsigned,
}
stateNeeded, err := gomatrixserverlib.StateNeededForEventBuilder(&eb)
if err != nil {
t.Fatalf("MustCreateEvent: failed to work out auth_events : %s", err)
}
eb.AuthEvents = room.AuthEvents(stateNeeded)
signedEvent, err := eb.Build(time.Now(), gomatrixserverlib.ServerName(s.ServerName), s.KeyID, s.Priv, room.Version)
if err != nil {
t.Fatalf("MustCreateEvent: failed to sign event: %s", err)
}
return signedEvent
}
// Mux returns this server's router so you can attach additional paths
func (s *Server) Mux() *mux.Router {
return s.mux
}
// Listen for federation server requests - call the returned function to gracefully close the server.
func (s *Server) Listen() (cancel func()) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
err := s.srv.ListenAndServeTLS(s.certPath, s.keyPath)
if err != nil && err != http.ErrServerClosed {
s.t.Logf("ListenFederationServer: ListenAndServeTLS failed: %s", err)
// Note that running s.t.FailNow is not allowed in a separate goroutine
// Tests will likely fail if the server is not listening anyways
}
}()
return func() {
err := s.srv.Shutdown(context.Background())
if err != nil {
s.t.Fatalf("ListenFederationServer: failed to shutdown server: %s", err)
}
wg.Wait() // wait for the server to shutdown
}
}
// GetOrCreateCaCert is used to create the federation TLS cert.
// In addition, it is passed to homeserver containers to create TLS certs
// for the homeservers.
// This basically acts as a test only valid PKI.
func GetOrCreateCaCert() (*x509.Certificate, *rsa.PrivateKey, error) {
var tlsCACertPath, tlsCAKeyPath string
if os.Getenv("CI") == "true" {
// When in CI we create the cert dir in the root directory instead.
tlsCACertPath = path.Join("/ca", "ca.crt")
tlsCAKeyPath = path.Join("/ca", "ca.key")
} else {
wd, err := os.Getwd()
if err != nil {
return nil, nil, err
}
tlsCACertPath = path.Join(wd, "ca", "ca.crt")
tlsCAKeyPath = path.Join(wd, "ca", "ca.key")
if _, err := os.Stat(path.Join(wd, "ca")); os.IsNotExist(err) {
err = os.Mkdir(path.Join(wd, "ca"), 0770)
if err != nil {
return nil, nil, err
}
}
}
if _, err := os.Stat(tlsCACertPath); err == nil {
if _, err := os.Stat(tlsCAKeyPath); err == nil {
// We already created a CA cert, let's use that.
dat, err := ioutil.ReadFile(tlsCACertPath)
if err != nil {
return nil, nil, err
}
block, _ := pem.Decode([]byte(dat))
if block == nil || block.Type != "CERTIFICATE" {
return nil, nil, errors.New("ca.crt is not a valid pem encoded x509 cert")
}
caCerts, err := x509.ParseCertificates(block.Bytes)
if err != nil {
return nil, nil, err
}
if len(caCerts) != 1 {
return nil, nil, errors.New("ca.crt contains none or more than one cert")
}
caCert := caCerts[0]
dat, err = ioutil.ReadFile(tlsCAKeyPath)
if err != nil {
return nil, nil, err
}
block, _ = pem.Decode([]byte(dat))
if block == nil || block.Type != "RSA PRIVATE KEY" {
return nil, nil, errors.New("ca.key is not a valid pem encoded rsa private key")
}
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, nil, err
}
return caCert, priv, nil
}
}
// valid for 10 years
certificateDuration := time.Hour * 24 * 365 * 10
priv, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, err
}
notBefore := time.Now()
notAfter := notBefore.Add(certificateDuration)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, err
}
caCert := x509.Certificate{
SerialNumber: serialNumber,
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
Subject: pkix.Name{
Organization: []string{"matrix.org"},
Country: []string{"GB"},
Province: []string{"London"},
Locality: []string{"London"},
StreetAddress: []string{"123 Street"},
PostalCode: []string{"12345"},
},
}
derBytes, err := x509.CreateCertificate(rand.Reader, &caCert, &caCert, &priv.PublicKey, priv)
if err != nil {
return nil, nil, err
}
certOut, err := os.Create(tlsCACertPath)
if err != nil {
return nil, nil, err
}
defer certOut.Close() // nolint: errcheck
if err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
return nil, nil, err
}
keyOut, err := os.OpenFile(tlsCAKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return nil, nil, err
}
defer keyOut.Close() // nolint: errcheck
err = pem.Encode(keyOut, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(priv),
})
if err != nil {
return nil, nil, err
}
return &caCert, priv, nil
}
// federationServer creates a federation server with the given handler
func federationServer(name string, h http.Handler) (*http.Server, string, string, error) {
var derBytes []byte
srv := &http.Server{
Addr: ":8448",
Handler: h,
}
tlsCertPath := path.Join(os.TempDir(), "complement.crt")
tlsKeyPath := path.Join(os.TempDir(), "complement.key")
certificateDuration := time.Hour
priv, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, "", "", err
}
notBefore := time.Now()
notAfter := notBefore.Add(certificateDuration)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, "", "", err
}
template := x509.Certificate{
SerialNumber: serialNumber,
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
Subject: pkix.Name{
Organization: []string{"matrix.org"},
Country: []string{"GB"},
Province: []string{"London"},
Locality: []string{"London"},
StreetAddress: []string{"123 Street"},
PostalCode: []string{"12345"},
},
}
host := docker.HostnameRunningComplement
if ip := net.ParseIP(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)
}
if os.Getenv("COMPLEMENT_CA") == "true" {
// Gate COMPLEMENT_CA
var ca *x509.Certificate
var caPrivKey *rsa.PrivateKey
ca, caPrivKey, err = GetOrCreateCaCert()
if err != nil {
return nil, "", "", err
}
derBytes, err = x509.CreateCertificate(rand.Reader, &template, ca, &priv.PublicKey, caPrivKey)
if err != nil {
return nil, "", "", err
}
} else {
derBytes, err = x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return nil, "", "", err
}
}
certOut, err := os.Create(tlsCertPath)
if err != nil {
return nil, "", "", err
}
defer certOut.Close() // nolint: errcheck
if err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
return nil, "", "", err
}
keyOut, err := os.OpenFile(tlsKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return nil, "", "", err
}
defer keyOut.Close() // nolint: errcheck
err = pem.Encode(keyOut, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(priv),
})
if err != nil {
return nil, "", "", err
}
return srv, tlsCertPath, tlsKeyPath, nil
}
type nopKeyDatabase struct {
gomatrixserverlib.KeyFetcher
}
func (d *nopKeyDatabase) StoreKeys(ctx context.Context, results map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult) error {
return nil
}
func (f *nopKeyDatabase) FetchKeys(
ctx context.Context,
requests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp) (
map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult, error,
) {
return nil, nil
}
func (f *nopKeyDatabase) FetcherName() string {
return "nopKeyDatabase"
}
type basicKeyFetcher struct {
gomatrixserverlib.KeyFetcher
srv *Server
}
func (f *basicKeyFetcher) FetchKeys(
ctx context.Context,
requests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp) (
map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult, error,
) {
result := make(map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult, len(requests))
for req := range requests {
if string(req.ServerName) == f.srv.ServerName && req.KeyID == f.srv.KeyID {
publicKey := f.srv.Priv.Public().(ed25519.PublicKey)
result[req] = gomatrixserverlib.PublicKeyLookupResult{
ValidUntilTS: gomatrixserverlib.AsTimestamp(time.Now().Add(24 * time.Hour)),
ExpiredTS: gomatrixserverlib.PublicKeyNotExpired,
VerifyKey: gomatrixserverlib.VerifyKey{
Key: gomatrixserverlib.Base64Bytes(publicKey),
},
}
} else {
return f.KeyFetcher.FetchKeys(ctx, requests)
}
}
return result, nil
}
func (f *basicKeyFetcher) FetcherName() string {
return "basicKeyFetcher"
}
|
[
"\"CI\"",
"\"COMPLEMENT_CA\""
] |
[] |
[
"COMPLEMENT_CA",
"CI"
] |
[]
|
["COMPLEMENT_CA", "CI"]
|
go
| 2 | 0 | |
core/pkg/deployer/deployer.go
|
package deployer
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
)
var (
certPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
tokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
)
type metadata struct {
Name string `json:"name"`
}
type spec struct {
Image string `json:"image"`
}
// App CRD spec
type app struct {
ApiVersion string `json:"apiVersion"`
Kind string `json:"kind"`
Metadata metadata `json:"metadata"`
Spec spec `json:"spec"`
}
// Patch request config
type patch struct {
Spec spec `json:"spec"`
}
// List of available image tags
type tagsList struct {
Name string
Tags []string
}
type Deployer struct {
Address string
token string
caCertPool *x509.CertPool
podClient v1.PodInterface
}
func NewDeployer() *Deployer {
d := Deployer{
Address: "https://" + os.Getenv("KUBERNETES_SERVICE_HOST") + "/apis/app.charon.cr/v1alpha2/namespaces/default/apps/",
}
d.setToken()
d.setCertPool()
d.createPodClient()
return &d
}
func (d *Deployer) setToken() {
read, err := ioutil.ReadFile(tokenPath)
if err != nil {
log.Fatal(fmt.Errorf("Cannot read token, %w\n", err))
}
d.token = "Bearer " + string(read)
}
func (d *Deployer) setCertPool() {
caCert, err := ioutil.ReadFile(certPath)
if err != nil {
log.Fatal(fmt.Errorf("Cannot get cert, %w\n", err))
}
d.caCertPool = x509.NewCertPool()
d.caCertPool.AppendCertsFromPEM(caCert)
}
func (d *Deployer) createPodClient() {
// Create the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
log.Fatal(fmt.Errorf("Failed to create in-cluster config: %w", err))
}
// Create clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(fmt.Errorf("Failed to create clientset: %w", err))
}
d.podClient = clientset.CoreV1().Pods(corev1.NamespaceDefault)
}
func (d *Deployer) createNewCR(name string, img string) {
// Create updated json config for the App
newApp := app{
ApiVersion: "app.charon.cr/v1alpha2",
Kind: "App",
Metadata: metadata{
Name: name,
},
Spec: spec{
Image: img,
},
}
// Create HTTP client
httpcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: d.caCertPool,
},
},
}
reqBody, err := json.Marshal(newApp)
if err != nil {
log.Fatal(fmt.Errorf("Failed to create cr spec: %v\n %w\n", newApp, err))
}
// Send request to create App
req, err := http.NewRequest("POST", d.Address, bytes.NewReader(reqBody))
if err != nil {
log.Fatal(fmt.Errorf("Failed to send create request: %w\n", err))
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", d.token)
resp, err := httpcli.Do(req)
if err != nil {
log.Fatal(fmt.Errorf("Failed to create cr; %w\n", err))
}
defer resp.Body.Close()
}
func (d *Deployer) SendPatch(name string, img string) {
registryName := os.Getenv("REGISTRY")
img = registryName + img
updApp, err := d.podClient.Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
d.createNewCR(name, img)
fmt.Println("Created new CR")
return
}
// Create HTTP client
httpcli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: d.caCertPool,
},
},
}
// If exists, send patch to app cr
newApp := patch{
Spec: spec{
Image: img,
},
}
reqBody, err := json.Marshal(newApp)
if err != nil {
log.Fatal(fmt.Errorf("Failed to create cr spec: %v\n %w\n", newApp, err))
}
req, err := http.NewRequest("PATCH", d.Address, bytes.NewReader(reqBody))
if err != nil {
log.Fatal(fmt.Errorf("Failed to send patch; %w\n", err))
}
req.Header.Add("Content-Type", "application/merge-patch+json")
req.Header.Add("Accept", "application/json")
req.Header.Add("Authorization", d.token)
resp, err := httpcli.Do(req)
if err != nil {
log.Fatal(fmt.Errorf("Patch failed: %v", err))
}
defer resp.Body.Close()
// Update pod
updApp.Spec.Containers[0].Image = img
_, updErr := d.podClient.Update(context.Background(), updApp, metav1.UpdateOptions{})
if updErr != nil {
log.Fatal(fmt.Errorf("Update failed: %v", updErr))
}
}
func (d *Deployer) GetPreviousVersion(name string) string {
registryAddr := "http://" + os.Getenv("REGISTRY") + "v2/" + name + "/tags/list"
resp, err := http.Get(registryAddr)
if err != nil {
err = fmt.Errorf("Failed to get image tags: %w", err)
log.Fatal(err)
}
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
var tl = tagsList{}
err = json.Unmarshal(respBytes, &tl)
if err != nil {
err = fmt.Errorf("Failed to parse body: %s\n %w", resp.Body, err)
log.Fatal(err)
}
return tl.Tags[1]
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"REGISTRY\"",
"\"REGISTRY\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"REGISTRY"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "REGISTRY"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.